From ccf501a420d12d79b803ccf7334d0db978e4724e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 18 Oct 2021 04:51:11 +0000 Subject: [PATCH 001/445] Initial implementation of /report, fixing #13 --- src/client_server/mod.rs | 2 + src/client_server/report.rs | 75 +++++++++++++++++++++++++++++++++++++ src/main.rs | 1 + 3 files changed, 78 insertions(+) create mode 100644 src/client_server/report.rs diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index e0c340f1..115ddaf6 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -16,6 +16,7 @@ mod profile; mod push; mod read_marker; mod redact; +mod report; mod room; mod search; mod session; @@ -47,6 +48,7 @@ pub use profile::*; pub use push::*; pub use read_marker::*; pub use redact::*; +pub use report::*; pub use room::*; pub use search::*; pub use session::*; diff --git a/src/client_server/report.rs b/src/client_server/report.rs new file mode 100644 index 00000000..e56cbc9f --- /dev/null +++ b/src/client_server/report.rs @@ -0,0 +1,75 @@ +use std::sync::Arc; + +use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma}; +use ruma::{ + api::client::{error::ErrorKind, r0::room::report_content}, + events::room::message, + Int, +}; + +#[cfg(feature = "conduit_bin")] +use rocket::post; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` +/// +/// Reports an inappropriate event to homeserver admins +/// +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/rooms/<_>/report/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn report_event_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let pdu = match db.rooms.get_pdu(&body.event_id) { + Ok(pdu) if !pdu.is_none() => pdu, + _ => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid Event ID", + )) + } + } + .unwrap(); + + if body.score >= Int::from(0) && body.score <= Int::from(-100) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid score, must be within 0 to -100", + )); + }; + + if body.reason.chars().count() > 160 { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Reason too long, should be 160 characters or fewer", + )); + }; + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + db.admin.send(AdminCommand::SendMessage( + message::RoomMessageEventContent::text_plain(format!( + "Report received from: {}\r\n\r\nEvent ID: {}\r\nRoom ID: {}\r\nSent By: {}\r\n\r\nReport Score: {}\r\nReport Reason: {}", + sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason, + )), + )); + + drop(state_lock); + + db.flush()?; + + Ok(report_content::Response {}.into()) +} diff --git a/src/main.rs b/src/main.rs index 84dfb1fc..56faa3e7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -101,6 +101,7 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< client_server::create_typing_event_route, client_server::create_room_route, client_server::redact_event_route, + client_server::report_event_route, client_server::create_alias_route, client_server::delete_alias_route, client_server::get_alias_route, From 1541b93f457de2d5fb8c37739d6791fa3f60312b Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 18 Oct 2021 05:38:41 +0000 Subject: [PATCH 002/445] Make reports look nicer and reduce spam potential, increase max report length to 1000 characters --- src/client_server/report.rs | 39 ++++++++++++++++++++++++++++++------- 1 file changed, 32 insertions(+), 7 deletions(-) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index e56cbc9f..7f66fa13 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -8,7 +8,7 @@ use ruma::{ }; #[cfg(feature = "conduit_bin")] -use rocket::post; +use rocket::{http::RawStr, post}; /// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` /// @@ -43,10 +43,10 @@ pub async fn report_event_route( )); }; - if body.reason.chars().count() > 160 { + if body.reason.chars().count() > 1000 { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Reason too long, should be 160 characters or fewer", + "Reason too long, should be 1000 characters or fewer", )); }; @@ -61,10 +61,35 @@ pub async fn report_event_route( let state_lock = mutex_state.lock().await; db.admin.send(AdminCommand::SendMessage( - message::RoomMessageEventContent::text_plain(format!( - "Report received from: {}\r\n\r\nEvent ID: {}\r\nRoom ID: {}\r\nSent By: {}\r\n\r\nReport Score: {}\r\nReport Reason: {}", - sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason, - )), + message::RoomMessageEventContent::text_html( + format!( + concat!( + "Report received from: {}\r\n\r\n", + "Event ID: {}\r\n", + "Room ID: {}\r\n", + "Sent By: {}\r\n\r\n", + "Report Score: {}\r\n", + "Report Reason: {}" + ), + sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason + ) + .to_owned(), + format!( + concat!( + "
Report received from: {}
", + "Event Info

Event ID: {}
Room ID: {}
Sent By: {}", + "

Report Info

Report Score: {}", + "
Report Reason: {}

" + ), + sender_user, + pdu.event_id, + pdu.room_id, + pdu.sender, + body.score, + RawStr::new(&body.reason).html_escape() + ) + .to_owned(), + ), )); drop(state_lock); From 50f931a2fda72d94a6190092dac18f2268c96af1 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Wed, 20 Oct 2021 11:12:06 +0000 Subject: [PATCH 003/445] Cleanup and fix validation in report.rs, lower max report length, better html --- src/client_server/report.rs | 53 +++++++++++++------------------------ 1 file changed, 18 insertions(+), 35 deletions(-) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 7f66fa13..3dcb4d1c 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, @@ -25,62 +23,49 @@ pub async fn report_event_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let pdu = match db.rooms.get_pdu(&body.event_id) { - Ok(pdu) if !pdu.is_none() => pdu, + let pdu = match db.rooms.get_pdu(&body.event_id)? { + Some(pdu) => pdu, _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Invalid Event ID", )) } - } - .unwrap(); + }; - if body.score >= Int::from(0) && body.score <= Int::from(-100) { + if body.score > Int::from(0) || body.score < Int::from(-100) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); }; - if body.reason.chars().count() > 1000 { + if body.reason.chars().count() > 250 { return Err(Error::BadRequest( ErrorKind::InvalidParam, - "Reason too long, should be 1000 characters or fewer", + "Reason too long, should be 250 characters or fewer", )); }; - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(body.room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - db.admin.send(AdminCommand::SendMessage( message::RoomMessageEventContent::text_html( format!( - concat!( - "Report received from: {}\r\n\r\n", - "Event ID: {}\r\n", - "Room ID: {}\r\n", - "Sent By: {}\r\n\r\n", - "Report Score: {}\r\n", - "Report Reason: {}" - ), + "Report received from: {}\n\n\ + Event ID: {}\n\ + Room ID: {}\n\ + Sent By: {}\n\n\ + Report Score: {}\n\ + Report Reason: {}", sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason ) .to_owned(), format!( - concat!( - "
Report received from: {}
", - "Event Info

Event ID: {}
Room ID: {}
Sent By: {}", - "

Report Info

Report Score: {}", - "
Report Reason: {}

" - ), + "
Report received from: {0}\ +
  • Event Info
    • Event ID: {1}\ + 🔗
    • Room ID: {2}\ +
    • Sent By: {3}
  • \ + Report Info
    • Report Score: {4}
    • Report Reason: {5}
  • \ +
", sender_user, pdu.event_id, pdu.room_id, @@ -92,8 +77,6 @@ pub async fn report_event_route( ), )); - drop(state_lock); - db.flush()?; Ok(report_content::Response {}.into()) From bbe16f84679061f1f4af5c1ab76f519279a234c0 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 24 Oct 2021 00:45:02 +0000 Subject: [PATCH 004/445] Update Ruma --- Cargo.toml | 2 +- src/client_server/room.rs | 13 +++++++++---- 2 files changed, 10 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index dae68bf1..13a7af44 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "58cdcae1f9a8f4824bcbec1de1bb13e659c66804", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "e7f01ca55a1eff437bad754bf0554cc09f44ec2a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 2d1fe237..ec09eec8 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,10 +22,10 @@ use ruma::{ }, EventType, }, - serde::JsonObject, + serde::{JsonObject}, RoomAliasId, RoomId, RoomVersionId, }; -use serde_json::value::to_raw_value; +use serde_json::{value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; use tracing::{info, warn}; @@ -102,9 +102,14 @@ pub async fn create_room_route( } })?; + let creation_content = match body.creation_content.clone() { + Some(content) => content.deserialize().expect("Invalid creation content"), + None => create_room::CreationContent::new(), + }; + let mut content = RoomCreateEventContent::new(sender_user.clone()); - content.federate = body.creation_content.federate; - content.predecessor = body.creation_content.predecessor.clone(); + content.federate = creation_content.federate; + content.predecessor = creation_content.predecessor.clone(); content.room_version = match body.room_version.clone() { Some(room_version) => { if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { From 8087a26a35fdcd495e28e8bff401fa3ba2afd9ef Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 24 Oct 2021 20:26:51 +0000 Subject: [PATCH 005/445] Make createRoom follow spec for m.room.create, allowing creation of spaces --- src/client_server/room.rs | 65 +++++++++++++++++++++++++++++++-------- 1 file changed, 53 insertions(+), 12 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index ec09eec8..5e59e81d 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,11 +22,16 @@ use ruma::{ }, EventType, }, - serde::{JsonObject}, + serde::{CanonicalJsonObject, JsonObject, Raw}, RoomAliasId, RoomId, RoomVersionId, }; -use serde_json::{value::to_raw_value}; -use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; +use serde_json::{json, value::to_raw_value}; +use std::{ + cmp::max, + collections::BTreeMap, + convert::{TryFrom, TryInto}, + sync::Arc, +}; use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] @@ -102,15 +107,7 @@ pub async fn create_room_route( } })?; - let creation_content = match body.creation_content.clone() { - Some(content) => content.deserialize().expect("Invalid creation content"), - None => create_room::CreationContent::new(), - }; - - let mut content = RoomCreateEventContent::new(sender_user.clone()); - content.federate = creation_content.federate; - content.predecessor = creation_content.predecessor.clone(); - content.room_version = match body.room_version.clone() { + let room_version = match body.room_version.clone() { Some(room_version) => { if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { room_version @@ -124,6 +121,50 @@ pub async fn create_room_route( None => RoomVersionId::Version6, }; + let content = match &body.creation_content { + Some(content) => { + let mut content = content + .deserialize_as::() + .expect("Invalid creation content"); + content.insert( + "creator".into(), + json!(sender_user.clone()).try_into().unwrap(), + ); + content.insert( + "room_version".into(), + json!(room_version.as_str()).try_into().unwrap(), + ); + content + } + None => { + let mut content = Raw::::from_json( + to_raw_value(&RoomCreateEventContent::new(sender_user.clone())).unwrap(), + ) + .deserialize_as::() + .unwrap(); + content.insert( + "room_version".into(), + json!(room_version.as_str()).try_into().unwrap(), + ); + content + } + }; + + // Validate creation content + match Raw::::from_json( + to_raw_value(&content).expect("Invalid creation content"), + ) + .deserialize_as::() + { + Ok(_t) => {} + Err(_e) => { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + )) + } + }; + // 1. The room create event db.rooms.build_and_append_pdu( PduBuilder { From d5d25fb064449cb42a0243248e6fc2020bf77fe2 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 24 Oct 2021 22:13:08 +0000 Subject: [PATCH 006/445] Preserve all m.room.create entries when performing room upgrades --- src/client_server/room.rs | 37 ++++++++++++++++++++++++++++--------- 1 file changed, 28 insertions(+), 9 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 5e59e81d..0c62d2d6 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -478,7 +478,7 @@ pub async fn get_room_aliases_route( .into()) } -/// # `GET /_matrix/client/r0/rooms/{roomId}/upgrade` +/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` /// /// Upgrades the room. /// @@ -556,16 +556,15 @@ pub async fn upgrade_room_route( ); let state_lock = mutex_state.lock().await; - // Get the old room federations status - let federate = serde_json::from_str::( + // Get the old room creation event + let mut create_event_content = serde_json::from_str::( db.rooms .room_state_get(&body.room_id, &EventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content .get(), ) - .map_err(|_| Error::bad_database("Invalid room event in database."))? - .federate; + .map_err(|_| Error::bad_database("Invalid room event in database."))?; // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( @@ -574,10 +573,30 @@ pub async fn upgrade_room_route( )); // Send a m.room.create event containing a predecessor field and the applicable room_version - let mut create_event_content = RoomCreateEventContent::new(sender_user.clone()); - create_event_content.federate = federate; - create_event_content.room_version = body.new_version.clone(); - create_event_content.predecessor = predecessor; + create_event_content.insert( + "creator".into(), + json!(sender_user.clone()).try_into().unwrap(), + ); + create_event_content.insert( + "room_version".into(), + json!(body.new_version.clone()).try_into().unwrap(), + ); + create_event_content.insert("predecessor".into(), json!(predecessor).try_into().unwrap()); + + // Validate creation event content + match Raw::::from_json( + to_raw_value(&create_event_content).expect("Error forming creation event"), + ) + .deserialize_as::() + { + Ok(_t) => {} + Err(_e) => { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Error forming creation event", + )) + } + }; db.rooms.build_and_append_pdu( PduBuilder { From 743bdbe96125881418feb8583edb75ca703da4fc Mon Sep 17 00:00:00 2001 From: Andrew Morgan Date: Tue, 26 Oct 2021 13:30:02 +0000 Subject: [PATCH 007/445] Add 'Federation publicRoom Name/topic keys are correct' test to sytest whitelist --- tests/sytest/sytest-whitelist | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist index eda851ad..5afc3fd9 100644 --- a/tests/sytest/sytest-whitelist +++ b/tests/sytest/sytest-whitelist @@ -510,3 +510,4 @@ remote user can join room with version 5 remote user can join room with version 6 setting 'm.room.name' respects room powerlevel setting 'm.room.power_levels' respects room powerlevel +Federation publicRoom Name/topic keys are correct From 86177faae7f812136d02d08fe2f6533eabe28642 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 7 Nov 2021 07:57:15 +0000 Subject: [PATCH 008/445] Fix join panic bug --- src/client_server/membership.rs | 2 +- src/server_server.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 732f6162..ec685ec9 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -934,7 +934,7 @@ pub(crate) async fn invite_helper<'a>( unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"), + to_raw_value(&prev_pdu.sender).expect("UserId is valid"), ); } diff --git a/src/server_server.rs b/src/server_server.rs index 68e262b4..482edf0f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2721,7 +2721,7 @@ pub fn create_join_event_template_route( unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); unsigned.insert( "prev_sender".to_owned(), - serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"), + to_raw_value(&prev_pdu.sender).expect("UserId is valid"), ); } From c4bce1d0c7ee0ba9c88fdccb11ac79112c19075b Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Tue, 9 Nov 2021 16:12:44 +0000 Subject: [PATCH 009/445] Cleanup room.rs; replace unwraps with map_err --- src/client_server/room.rs | 86 +++++++++++++++++++++++---------------- 1 file changed, 50 insertions(+), 36 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 0c62d2d6..47c7ee6f 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,7 +22,7 @@ use ruma::{ }, EventType, }, - serde::{CanonicalJsonObject, JsonObject, Raw}, + serde::{CanonicalJsonObject, JsonObject}, RoomAliasId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; @@ -128,42 +128,48 @@ pub async fn create_room_route( .expect("Invalid creation content"); content.insert( "creator".into(), - json!(sender_user.clone()).try_into().unwrap(), + json!(&sender_user).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, ); content.insert( "room_version".into(), - json!(room_version.as_str()).try_into().unwrap(), + json!(room_version.as_str()).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, ); content } None => { - let mut content = Raw::::from_json( - to_raw_value(&RoomCreateEventContent::new(sender_user.clone())).unwrap(), + let mut content = serde_json::from_str::( + to_raw_value(&RoomCreateEventContent::new(sender_user.clone())) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? + .get(), ) - .deserialize_as::() .unwrap(); content.insert( "room_version".into(), - json!(room_version.as_str()).try_into().unwrap(), + json!(room_version.as_str()).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, ); content } }; // Validate creation content - match Raw::::from_json( - to_raw_value(&content).expect("Invalid creation content"), - ) - .deserialize_as::() - { - Ok(_t) => {} - Err(_e) => { - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Invalid creation content", - )) - } - }; + let de_result = serde_json::from_str::( + to_raw_value(&content) + .expect("Invalid creation content") + .get(), + ); + + if let Err(_) = de_result { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + )); + } // 1. The room create event db.rooms.build_and_append_pdu( @@ -575,28 +581,36 @@ pub async fn upgrade_room_route( // Send a m.room.create event containing a predecessor field and the applicable room_version create_event_content.insert( "creator".into(), - json!(sender_user.clone()).try_into().unwrap(), + json!(&sender_user) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, ); create_event_content.insert( "room_version".into(), - json!(body.new_version.clone()).try_into().unwrap(), + json!(&body.new_version) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + create_event_content.insert( + "predecessor".into(), + json!(predecessor) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, ); - create_event_content.insert("predecessor".into(), json!(predecessor).try_into().unwrap()); // Validate creation event content - match Raw::::from_json( - to_raw_value(&create_event_content).expect("Error forming creation event"), - ) - .deserialize_as::() - { - Ok(_t) => {} - Err(_e) => { - return Err(Error::BadRequest( - ErrorKind::BadJson, - "Error forming creation event", - )) - } - }; + let de_result = serde_json::from_str::( + to_raw_value(&create_event_content) + .expect("Error forming creation event") + .get(), + ); + + if let Err(_) = de_result { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Error forming creation event", + )); + } db.rooms.build_and_append_pdu( PduBuilder { From 109892b4b754e1666d4f00d9aec6356b46093668 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Fri, 1 Oct 2021 15:53:16 +0200 Subject: [PATCH 010/445] Implement turn server settings this fills out the infos in /_matrix/client/r0/voip/turnServer with values specified in the server config --- src/client_server/voip.rs | 14 +++++++------- src/database.rs | 12 ++++++++++++ src/database/globals.rs | 16 ++++++++++++++++ 3 files changed, 35 insertions(+), 7 deletions(-) diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 2a7f28e1..83f39a48 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,4 +1,4 @@ -use crate::ConduitResult; +use crate::{database::DatabaseGuard, ConduitResult}; use ruma::api::client::r0::voip::get_turn_server_info; use std::time::Duration; @@ -9,13 +9,13 @@ use rocket::get; /// /// TODO: Returns information about the recommended turn server. #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -#[tracing::instrument] -pub async fn turn_server_route() -> ConduitResult { +#[tracing::instrument(skip(db))] +pub async fn turn_server_route(db: DatabaseGuard) -> ConduitResult { Ok(get_turn_server_info::Response { - username: "".to_owned(), - password: "".to_owned(), - uris: Vec::new(), - ttl: Duration::from_secs(60 * 60 * 24), + username: db.globals.turn_username().clone(), + password: db.globals.turn_password().clone(), + uris: db.globals.turn_uris().to_vec(), + ttl: Duration::from_secs(db.globals.turn_ttl()), } .into()) } diff --git a/src/database.rs b/src/database.rs index 8cf4f640..85213c00 100644 --- a/src/database.rs +++ b/src/database.rs @@ -74,6 +74,14 @@ pub struct Config { trusted_servers: Vec>, #[serde(default = "default_log")] pub log: String, + #[serde(default)] + turn_username: String, + #[serde(default)] + turn_password: String, + #[serde(default = "Vec::new")] + turn_uris: Vec, + #[serde(default = "default_turn_ttl")] + turn_ttl: u64, #[serde(flatten)] catchall: BTreeMap, @@ -131,6 +139,10 @@ fn default_log() -> String { "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() } +fn default_turn_ttl() -> u64 { + 60 * 60 * 24 +} + #[cfg(feature = "sled")] pub type Engine = abstraction::sled::Engine; diff --git a/src/database/globals.rs b/src/database/globals.rs index f1cbbd92..7338f1ed 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -226,6 +226,22 @@ impl Globals { self.jwt_decoding_key.as_ref() } + pub fn turn_password(&self) -> &String { + &self.config.turn_password + } + + pub fn turn_ttl(&self) -> u64 { + self.config.turn_ttl + } + + pub fn turn_uris(&self) -> &[String] { + &self.config.turn_uris + } + + pub fn turn_username(&self) -> &String { + &self.config.turn_username + } + /// TODO: the key valid until timestamp is only honored in room version > 4 /// Remove the outdated keys and insert the new ones. /// From 9fccbb014a3297961fd169ce12363564e56afbc3 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Sat, 2 Oct 2021 00:37:39 +0200 Subject: [PATCH 011/445] Implement TURN server authentication with hmac This is a prefered method to allow limited access to the TURN server --- Cargo.lock | 35 +++++++++++++++++++++++++++ Cargo.toml | 3 +++ src/client_server/voip.rs | 51 +++++++++++++++++++++++++++++++++------ src/database.rs | 2 ++ src/database/globals.rs | 4 +++ 5 files changed, 88 insertions(+), 7 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 293bcff7..68293896 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -245,6 +245,7 @@ dependencies = [ "crossbeam", "directories", "heed", + "hmac", "http", "image", "jsonwebtoken", @@ -266,6 +267,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", + "sha-1", "sled", "thiserror", "thread_local", @@ -428,6 +430,16 @@ dependencies = [ "lazy_static", ] +[[package]] +name = "crypto-mac" +version = "0.11.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" +dependencies = [ + "generic-array", + "subtle", +] + [[package]] name = "curve25519-dalek" version = "3.2.0" @@ -897,6 +909,16 @@ dependencies = [ "libc", ] +[[package]] +name = "hmac" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +dependencies = [ + "crypto-mac", + "digest", +] + [[package]] name = "hostname" version = "0.3.1" @@ -2422,6 +2444,19 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "sha-1" +version = "0.9.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +dependencies = [ + "block-buffer", + "cfg-if 1.0.0", + "cpufeatures", + "digest", + "opaque-debug", +] + [[package]] name = "sha1" version = "0.6.0" diff --git a/Cargo.toml b/Cargo.toml index 13a7af44..fc83d11b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -79,6 +79,9 @@ num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } thread_local = "1.1.3" +# used for TURN server authentication +hmac = "0.11.0" +sha-1 = "0.9.8" [features] default = ["conduit_bin", "backend_sqlite"] diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 83f39a48..9c3b20d4 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,6 +1,11 @@ -use crate::{database::DatabaseGuard, ConduitResult}; +use crate::{database::DatabaseGuard, ConduitResult, Ruma}; +use hmac::{Hmac, Mac, NewMac}; use ruma::api::client::r0::voip::get_turn_server_info; -use std::time::Duration; +use ruma::SecondsSinceUnixEpoch; +use sha1::Sha1; +use std::time::{Duration, SystemTime}; + +type HmacSha1 = Hmac; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -8,12 +13,44 @@ use rocket::get; /// # `GET /_matrix/client/r0/voip/turnServer` /// /// TODO: Returns information about the recommended turn server. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -#[tracing::instrument(skip(db))] -pub async fn turn_server_route(db: DatabaseGuard) -> ConduitResult { +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/voip/turnServer", data = "") +)] +#[tracing::instrument(skip(body, db))] +pub async fn turn_server_route( + body: Ruma, + db: DatabaseGuard, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let turn_secret = db.globals.turn_secret(); + + let (username, password) = if turn_secret != "" { + let expiry = SecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()), + ) + .expect("time is valid"); + + let username: String = format!("{}:{}", expiry.get(), sender_user); + + let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes()) + .expect("HMAC can take key of any size"); + mac.update(username.as_bytes()); + + let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD); + + (username, password) + } else { + ( + db.globals.turn_username().clone(), + db.globals.turn_password().clone(), + ) + }; + Ok(get_turn_server_info::Response { - username: db.globals.turn_username().clone(), - password: db.globals.turn_password().clone(), + username: username, + password: password, uris: db.globals.turn_uris().to_vec(), ttl: Duration::from_secs(db.globals.turn_ttl()), } diff --git a/src/database.rs b/src/database.rs index 85213c00..080e24b3 100644 --- a/src/database.rs +++ b/src/database.rs @@ -80,6 +80,8 @@ pub struct Config { turn_password: String, #[serde(default = "Vec::new")] turn_uris: Vec, + #[serde(default)] + turn_secret: String, #[serde(default = "default_turn_ttl")] turn_ttl: u64, diff --git a/src/database/globals.rs b/src/database/globals.rs index 7338f1ed..05ecb568 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -242,6 +242,10 @@ impl Globals { &self.config.turn_username } + pub fn turn_secret(&self) -> &String { + &self.config.turn_secret + } + /// TODO: the key valid until timestamp is only honored in room version > 4 /// Remove the outdated keys and insert the new ones. /// From 2fff720df38c83673269fa597361c5631e991c9a Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 21 Nov 2021 17:34:08 +0000 Subject: [PATCH 012/445] CI: New Multiarch builds and Docker images + cargo clippy/test output now integrated into GitLab --- .dockerignore | 2 + .gitlab-ci.yml | 366 +++++++++++------------- Cargo.lock | 36 +-- Cargo.toml | 3 +- DEPLOY.md | 52 ++-- Dockerfile | 135 ++++----- docker/README.md | 107 ++++--- docker/ci-binaries-packaging.Dockerfile | 48 ++-- docker/healthcheck.sh | 6 +- 9 files changed, 351 insertions(+), 404 deletions(-) diff --git a/.dockerignore b/.dockerignore index 80b30721..933b380f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -14,6 +14,8 @@ docker-compose* # Git folder .git .gitea +.gitlab +.github # Dot files .env diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 386986fd..6f2e0fe3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,7 +9,6 @@ variables: FF_USE_FASTZIP: 1 CACHE_COMPRESSION_LEVEL: fastest - # --------------------------------------------------------------------- # # Cargo: Compiling for different architectures # # --------------------------------------------------------------------- # @@ -20,7 +19,7 @@ variables: rules: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' + - if: "$CI_COMMIT_TAG" interruptible: true image: "rust:latest" tags: ["docker"] @@ -28,258 +27,209 @@ variables: paths: - cargohome - target/ - key: "build_cache-$TARGET-release" + key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release" variables: - CARGO_PROFILE_RELEASE_LTO=true - CARGO_PROFILE_RELEASE_CODEGEN_UNITS=1 + CARGO_PROFILE_RELEASE_LTO: "true" + CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" before_script: - 'echo "Building for target $TARGET"' - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - 'apt-get update -yqq' - - 'echo "Installing packages: $NEEDED_PACKAGES"' - - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" + - "rustc --version && cargo --version && rustup show" # Print version info for debugging - "rustup target add $TARGET" script: - time cargo build --target $TARGET --release - - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' + - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' artifacts: expire_in: never - -build:release:cargo:x86_64-unknown-linux-gnu: +build:release:cargo:x86_64-unknown-linux-musl-with-debug: extends: .build-cargo-shared-settings + image: messense/rust-musl-cross:x86_64-musl variables: - TARGET: "x86_64-unknown-linux-gnu" + CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling + TARGET: "x86_64-unknown-linux-musl" + after_script: + - "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug" artifacts: - name: "conduit-x86_64-unknown-linux-gnu" + name: "conduit-x86_64-unknown-linux-musl-with-debug" paths: - - "conduit-x86_64-unknown-linux-gnu" - expose_as: "Conduit for x86_64-unknown-linux-gnu" + - "conduit-x86_64-unknown-linux-musl-with-debug" + expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug" -build:release:cargo:armv7-unknown-linux-gnueabihf: +build:release:cargo:x86_64-unknown-linux-musl: extends: .build-cargo-shared-settings + image: messense/rust-musl-cross:x86_64-musl variables: - TARGET: "armv7-unknown-linux-gnueabihf" - NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross" - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc - CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc - CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++ + TARGET: "x86_64-unknown-linux-musl" artifacts: - name: "conduit-armv7-unknown-linux-gnueabihf" + name: "conduit-x86_64-unknown-linux-musl" paths: - - "conduit-armv7-unknown-linux-gnueabihf" - expose_as: "Conduit for armv7-unknown-linux-gnueabihf" + - "conduit-x86_64-unknown-linux-musl" + expose_as: "Conduit for x86_64-unknown-linux-musl" -build:release:cargo:aarch64-unknown-linux-gnu: +build:release:cargo:arm-unknown-linux-musleabihf: extends: .build-cargo-shared-settings + image: messense/rust-musl-cross:arm-musleabihf variables: - TARGET: "aarch64-unknown-linux-gnu" - NEEDED_PACKAGES: "build-essential gcc-10-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross" - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc - CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc - CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++ - TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-10" - TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-10" + TARGET: "arm-unknown-linux-musleabihf" artifacts: - name: "conduit-aarch64-unknown-linux-gnu" + name: "conduit-arm-unknown-linux-musleabihf" paths: - - "conduit-aarch64-unknown-linux-gnu" - expose_as: "Conduit for aarch64-unknown-linux-gnu" + - "conduit-arm-unknown-linux-musleabihf" + expose_as: "Conduit for arm-unknown-linux-musleabihf" -build:release:cargo:x86_64-unknown-linux-musl: +build:release:cargo:armv7-unknown-linux-musleabihf: extends: .build-cargo-shared-settings - image: "rust:alpine" + image: messense/rust-musl-cross:armv7-musleabihf variables: - TARGET: "x86_64-unknown-linux-musl" - before_script: - - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - "rustup target add $TARGET" - - "apk add libc-dev" + TARGET: "armv7-unknown-linux-musleabihf" artifacts: - name: "conduit-x86_64-unknown-linux-musl" + name: "conduit-armv7-unknown-linux-musleabihf" paths: - - "conduit-x86_64-unknown-linux-musl" - expose_as: "Conduit for x86_64-unknown-linux-musl" - + - "conduit-armv7-unknown-linux-musleabihf" + expose_as: "Conduit for armv7-unknown-linux-musleabihf" +build:release:cargo:aarch64-unknown-linux-musl: + extends: .build-cargo-shared-settings + image: messense/rust-musl-cross:aarch64-musl + variables: + TARGET: "aarch64-unknown-linux-musl" + artifacts: + name: "conduit-aarch64-unknown-linux-musl" + paths: + - "conduit-aarch64-unknown-linux-musl" + expose_as: "Conduit for aarch64-unknown-linux-musl" .cargo-debug-shared-settings: extends: ".build-cargo-shared-settings" rules: - - if: '$CI_COMMIT_BRANCH' - - if: '$CI_COMMIT_TAG' + - if: '$CI_COMMIT_BRANCH != "master"' cache: - key: "build_cache-$TARGET-debug" + key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: - "time cargo build --target $TARGET" - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' artifacts: expire_in: 4 weeks -build:debug:cargo:x86_64-unknown-linux-gnu: - extends: ".cargo-debug-shared-settings" - variables: - TARGET: "x86_64-unknown-linux-gnu" - artifacts: - name: "conduit-debug-x86_64-unknown-linux-gnu" - paths: - - "conduit-debug-x86_64-unknown-linux-gnu" - expose_as: "Conduit DEBUG for x86_64-unknown-linux-gnu" - build:debug:cargo:x86_64-unknown-linux-musl: extends: ".cargo-debug-shared-settings" - image: "rust:alpine" + image: messense/rust-musl-cross:x86_64-musl variables: TARGET: "x86_64-unknown-linux-musl" - before_script: - - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - "rustup target add $TARGET" - - "apk add libc-dev" artifacts: name: "conduit-debug-x86_64-unknown-linux-musl" paths: - "conduit-debug-x86_64-unknown-linux-musl" expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl" - - -# --------------------------------------------------------------------- # -# Cargo: Compiling deb packages for different architectures # -# --------------------------------------------------------------------- # - - -.build-cargo-deb-shared-settings: - stage: "build" - needs: [ ] - rules: - - if: '$CI_COMMIT_BRANCH == "master"' - - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' - interruptible: true - image: "rust:latest" - tags: ["docker"] - cache: - paths: - - cargohome - - target/ - key: "build_cache-deb-$TARGET" - before_script: - - 'echo "Building debian package for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - 'apt-get update -yqq' - - 'echo "Installing packages: $NEEDED_PACKAGES"' - - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" - - "rustup target add $TARGET" - - "cargo install cargo-deb" - script: - - time cargo deb --target $TARGET - - 'mv target/$TARGET/debian/*.deb "conduit-$TARGET.deb"' - -build:cargo-deb:x86_64-unknown-linux-gnu: - extends: .build-cargo-deb-shared-settings - variables: - TARGET: "x86_64-unknown-linux-gnu" - NEEDED_PACKAGES: "" - artifacts: - name: "conduit-x86_64-unknown-linux-gnu.deb" - paths: - - "conduit-x86_64-unknown-linux-gnu.deb" - expose_as: "Debian Package x86_64" - - # --------------------------------------------------------------------- # # Create and publish docker image # # --------------------------------------------------------------------- # -# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image .docker-shared-settings: stage: "build docker image" - needs: [] - interruptible: true - image: - name: "gcr.io/kaniko-project/executor:debug" - entrypoint: [""] + image: jdrouet/docker-with-buildx:stable tags: ["docker"] - variables: - # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache - KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" - before_script: - - "mkdir -p /kaniko/.docker" - - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' - - -build:docker:next: - extends: .docker-shared-settings + services: + - docker:dind needs: - "build:release:cargo:x86_64-unknown-linux-musl" + - "build:release:cargo:arm-unknown-linux-musleabihf" + - "build:release:cargo:armv7-unknown-linux-musleabihf" + - "build:release:cargo:aarch64-unknown-linux-musl" + variables: + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 + PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64" + DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + # Only log in to Dockerhub if the credentials are given: + - if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi script: + # Prepare buildx to build multiarch stuff: + - docker context create 'ci-context' + - docker buildx create --name 'multiarch-builder' --use 'ci-context' + # Copy binaries to their docker arch path + - mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64 + - mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6 + - mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7 + - mkdir -p linux/arm64/ && mv ./conduit-aarch64-unknown-linux-musl linux/arm64/v8 + # Actually create multiarch image: - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --force - --context $CI_PROJECT_DIR + docker buildx build + --pull + --push --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:next" - --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" - --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" + --platform "$PLATFORMS" + --tag "$GL_IMAGE_TAG" + --tag "$GL_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA" + --file "$DOCKER_FILE" . + # Only try to push to docker hub, if auth data for dockerhub exists: + - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG"; fi + - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"; fi + +build:docker:next: + extends: .docker-shared-settings rules: - if: '$CI_COMMIT_BRANCH == "next"' - + variables: + GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" + DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" build:docker:master: extends: .docker-shared-settings - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - script: - - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --context $CI_PROJECT_DIR - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:latest" - --destination "$CI_REGISTRY_IMAGE/conduit:latest-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest-alpine" rules: - if: '$CI_COMMIT_BRANCH == "master"' - - -build:docker:tags: - extends: .docker-shared-settings - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - script: - - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --context $CI_PROJECT_DIR - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG" - --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG-alpine" - rules: - - if: '$CI_COMMIT_TAG' - - + variables: + GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" + DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" + +## Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image +#.docker-shared-settings: +# stage: "build docker image" +# needs: [] +# interruptible: true +# image: +# name: "gcr.io/kaniko-project/executor:debug" +# entrypoint: [""] +# tags: ["docker"] +# variables: +# # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache +# KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" +# before_script: +# - "mkdir -p /kaniko/.docker" +# - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' +# +# +#build:docker:next: +# extends: .docker-shared-settings +# needs: +# - "build:release:cargo:x86_64-unknown-linux-musl" +# script: +# - > +# /kaniko/executor +# $KANIKO_CACHE_ARGS +# --force +# --context $CI_PROJECT_DIR +# --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') +# --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) +# --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" +# --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" +# --destination "$CI_REGISTRY_IMAGE/conduit:next" +# --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" +# --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" +# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" +# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine" +# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" +# rules: +# - if: '$CI_COMMIT_BRANCH == "next"' +# +# # --------------------------------------------------------------------- # # Run tests # @@ -287,9 +237,9 @@ build:docker:tags: test:cargo: stage: "test" - needs: [ ] + needs: [] image: "rust:latest" - tags: [ "docker" ] + tags: ["docker"] variables: CARGO_HOME: "cargohome" cache: @@ -301,13 +251,20 @@ test:cargo: before_script: - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget - rustup component add clippy rustfmt + - wget "https://faulty-storage.de/gitlab-report" + - chmod +x ./gitlab-report script: - - rustc --version && cargo --version # Print version info for debugging + - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check - - cargo test --workspace --verbose --locked - - cargo clippy + - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml" + - "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + artifacts: + when: always + reports: + junit: report.xml + codequality: gl-code-quality-report.json test:sytest: stage: "test" @@ -316,8 +273,8 @@ test:sytest: - "build:debug:cargo:x86_64-unknown-linux-musl" image: name: "valkum/sytest-conduit:latest" - entrypoint: [ "" ] - tags: [ "docker" ] + entrypoint: [""] + tags: ["docker"] variables: PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" before_script: @@ -330,7 +287,7 @@ test:sytest: script: - "SYTEST_EXIT_CODE=0" - "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1" - - "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap" + - 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap' - "exit $SYTEST_EXIT_CODE" artifacts: when: always @@ -340,7 +297,6 @@ test:sytest: reports: junit: "$CI_PROJECT_DIR/sytest.xml" - # --------------------------------------------------------------------- # # Store binaries as package so they have download urls # # --------------------------------------------------------------------- # @@ -348,25 +304,31 @@ test:sytest: publish:package: stage: "upload artifacts" needs: - - "build:release:cargo:x86_64-unknown-linux-gnu" - - "build:release:cargo:armv7-unknown-linux-gnueabihf" - - "build:release:cargo:aarch64-unknown-linux-gnu" - "build:release:cargo:x86_64-unknown-linux-musl" - - "build:cargo-deb:x86_64-unknown-linux-gnu" + - "build:release:cargo:arm-unknown-linux-musleabihf" + - "build:release:cargo:armv7-unknown-linux-musleabihf" + - "build:release:cargo:aarch64-unknown-linux-musl" + # - "build:cargo-deb:x86_64-unknown-linux-gnu" rules: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' + - if: "$CI_COMMIT_TAG" image: curlimages/curl:latest tags: ["docker"] variables: GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts script: - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' - + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"' +# Avoid duplicate pipelines +# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines +workflow: + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' + - if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS" + when: never + - if: "$CI_COMMIT_BRANCH" diff --git a/Cargo.lock b/Cargo.lock index 293bcff7..166d67fc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1968,7 +1968,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "assign", "js_int", @@ -1989,7 +1989,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "bytes", "http", @@ -2005,7 +2005,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2016,7 +2016,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "ruma-api", "ruma-common", @@ -2030,7 +2030,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "assign", "bytes", @@ -2050,7 +2050,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "indexmap", "js_int", @@ -2065,7 +2065,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "indoc", "js_int", @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2092,7 +2092,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "js_int", "ruma-api", @@ -2107,7 +2107,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "paste", "percent-encoding", @@ -2122,7 +2122,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2132,7 +2132,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "thiserror", ] @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "js_int", "ruma-api", @@ -2153,7 +2153,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "js_int", "ruma-api", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "bytes", "form_urlencoded", @@ -2182,7 +2182,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2193,7 +2193,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2210,7 +2210,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 13a7af44..d0dd6413 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -120,13 +120,12 @@ maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } [profile.dev] -lto = 'thin' +lto = 'off' incremental = true [profile.release] lto = 'thin' incremental = true - codegen-units=32 # If you want to make flamegraphs, enable debug info: # debug = true diff --git a/DEPLOY.md b/DEPLOY.md index 84dd2beb..6470c902 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -2,25 +2,30 @@ ## Getting help -If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us +in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Installing Conduit -You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: +Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore +only offer Linux binaries. -| CPU Architecture | GNU (Ubuntu, Debian, ArchLinux, ...) | MUSL (Alpine, ... ) | -| -------------------- | ------------------------------------- | ----------------------- | -| x84_64 / amd64 | [Download][x84_64-gnu] | [Download][x84_64-musl] | -| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - | -| armv8 / aarch64 | [Download][armv8-gnu] | - | +You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:release:cargo:x86_64-unknown-linux-gnu +| CPU Architecture | Download link | +| ------------------------------------------- | ----------------------- | +| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl] | +| armv6 | [Download][armv6-musl] | +| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl] | +| armv8 / aarch64 | [Download][armv8-musl] | [x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl -[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:release:cargo:armv7-unknown-linux-gnueabihf +[armv6-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf + +[armv7-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf -[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:release:cargo:aarch64-unknown-linux-gnu +[armv8-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl ```bash $ sudo wget -O /usr/local/bin/matrix-conduit @@ -32,15 +37,15 @@ Alternatively, you may compile the binary yourself using ```bash $ cargo build --release ``` + Note that this currently requires Rust 1.50. If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). - ## Adding a Conduit user -While Conduit can run as any user it is usually better to use dedicated users for different services. -This also allows you to make sure that the file permissions are correctly set up. +While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows +you to make sure that the file permissions are correctly set up. In Debian you can use this command to create a Conduit user: @@ -50,9 +55,8 @@ sudo adduser --system conduit --no-create-home ## Setting up a systemd service -Now we'll set up a systemd service for Conduit, so it's easy to start/stop -Conduit and set it to autostart when your server reboots. Simply paste the -default systemd service you can find below into +Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your +server reboots. Simply paste the default systemd service you can find below into `/etc/systemd/system/conduit.service`. ```systemd @@ -77,10 +81,10 @@ Finally, run $ sudo systemctl daemon-reload ``` - ## Creating the Conduit configuration file -Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.** +Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment +to read it. You need to change at least the server name.** ```toml [global] @@ -128,8 +132,8 @@ address = "127.0.0.1" # This makes sure Conduit can only be reached using the re ## Setting the correct file permissions -As we are using a Conduit specific user we need to allow it to read the config. -To do that you can run this command on Debian: +As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on +Debian: ```bash sudo chown -R conduit:nogroup /etc/matrix-conduit @@ -142,7 +146,6 @@ sudo mkdir -p /var/lib/matrix-conduit/conduit_db sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db ``` - ## Setting up the Reverse Proxy This depends on whether you use Apache, Nginx or another web server. @@ -171,11 +174,9 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ $ sudo systemctl reload apache2 ``` - ### Nginx -If you use Nginx and not Apache, add the following server section inside the -http section of `/etc/nginx/nginx.conf` +If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` ```nginx server { @@ -198,13 +199,13 @@ server { include /etc/letsencrypt/options-ssl-nginx.conf; } ``` + **You need to make some edits again.** When you are done, run ```bash $ sudo systemctl reload nginx ``` - ## SSL Certificate The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: @@ -213,7 +214,6 @@ The easiest way to get an SSL certificate, if you don't have one already, is to $ sudo certbot -d your.server.name ``` - ## You're done! Now you can start Conduit with: diff --git a/Dockerfile b/Dockerfile index f4b176f5..d137353a 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,75 +1,66 @@ -# Using multistage build: -# https://docs.docker.com/develop/develop-images/multistage-build/ -# https://whitfin.io/speeding-up-rust-docker-builds/ +# syntax=docker/dockerfile:1 +FROM docker.io/rust:1.53-alpine AS builder +WORKDIR /usr/src/conduit +# Install required packages to build Conduit and it's dependencies +RUN apk add musl-dev -########################## BUILD IMAGE ########################## -# Alpine build image to build Conduit's statically compiled binary -FROM alpine:3.14 as builder +# == Build dependencies without our own code separately for caching == +# +# Need a fake main.rs since Cargo refuses to build anything otherwise. +# +# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature +# request that would allow just dependencies to be compiled, presumably +# regardless of whether source files are available. +RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs +COPY Cargo.toml Cargo.lock ./ +RUN cargo build --release && rm -r src + +# Copy over actual Conduit sources +COPY src src + +# main.rs and lib.rs need their timestamp updated for this to work correctly since +# otherwise the build with the fake main.rs from above is newer than the +# source files (COPY preserves timestamps). +# +# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit +RUN touch src/main.rs && touch src/lib.rs && cargo build --release -# Install packages needed for building all crates -RUN apk add --no-cache \ - cargo \ - openssl-dev - -# Specifies if the local project is build or if Conduit gets build -# from the official git repository. Defaults to the git repo. -ARG LOCAL=false -# Specifies which revision/commit is build. Defaults to HEAD -ARG GIT_REF=origin/master - -# Copy project files from current folder -COPY . . -# Build it from the copied local files or from the official git repository -RUN if [[ $LOCAL == "true" ]]; then \ - mv ./docker/healthcheck.sh . ; \ - echo "Building from local source..." ; \ - cargo install --path . ; \ - else \ - echo "Building revision '${GIT_REF}' from online source..." ; \ - cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF} ; \ - echo "Loadings healthcheck script from online source..." ; \ - wget "https://gitlab.com/famedly/conduit/-/raw/${GIT_REF#origin/}/docker/healthcheck.sh" ; \ - fi - -########################## RUNTIME IMAGE ########################## -# Create new stage with a minimal image for the actual -# runtime image/container -FROM alpine:3.14 - -ARG CREATED -ARG VERSION -ARG GIT_REF=origin/master -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" -# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md -# including a custom label specifying the build command -LABEL org.opencontainers.image.created=${CREATED} \ - org.opencontainers.image.authors="Conduit Contributors" \ - org.opencontainers.image.title="Conduit" \ - org.opencontainers.image.version=${VERSION} \ - org.opencontainers.image.vendor="Conduit Contributors" \ - org.opencontainers.image.description="A Matrix homeserver written in Rust" \ - org.opencontainers.image.url="https://conduit.rs/" \ - org.opencontainers.image.revision=${GIT_REF} \ - org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ - org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="" \ - org.opencontainers.image.ref.name="" \ - org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ - maintainer="Weasy666" - -# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose. + +# --------------------------------------------------------------------------------------------------------------- +# Stuff below this line actually ends up in the resulting docker image +# --------------------------------------------------------------------------------------------------------------- +FROM docker.io/alpine:3.14 AS runner + +# Standard port on which Conduit launches. +# You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Copy config files from context and the binary from -# the "builder" stage to the current stage into folder -# /srv/conduit and create data folder for database +# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" + +# Conduit needs: +# ca-certificates: for https +# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. +RUN apk add --no-cache \ + ca-certificates \ + curl \ + libgcc + + +# Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit -COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/ -COPY --from=builder ./healthcheck.sh /srv/conduit/ +# Test if Conduit is still alive, uses the same endpoint as Element +COPY ./docker/healthcheck.sh /srv/conduit/ +HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh + +# Copy over the actual Conduit binary from the builder stage +COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/ + +# Improve security: Don't run stuff as root, that does not need to run as root: # Add www-data user and group with UID 82, as used by alpine # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install RUN set -x ; \ @@ -79,19 +70,13 @@ RUN set -x ; \ # Change ownership of Conduit files to www-data user and group RUN chown -cR www-data:www-data /srv/conduit +RUN chmod +x /srv/conduit/healthcheck.sh -# Install packages needed to run Conduit -RUN apk add --no-cache \ - ca-certificates \ - curl \ - libgcc - -# Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh - -# Set user to www-data +# Change user to www-data USER www-data # Set container home directory WORKDIR /srv/conduit -# Run Conduit -ENTRYPOINT [ "/srv/conduit/conduit" ] + +# Run Conduit and print backtraces on panics +ENV RUST_BACKTRACE=1 +ENTRYPOINT [ "/srv/conduit/conduit" ] \ No newline at end of file diff --git a/docker/README.md b/docker/README.md index 0e834820..19d9dca6 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,53 +2,41 @@ > **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate. - ## Docker ### Build & Dockerfile The Dockerfile provided by Conduit has two stages, each of which creates an image. -1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. -2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. -The Dockerfile includes a few build arguments that should be supplied when building it. - -``` Dockerfile -ARG LOCAL=false -ARG CREATED -ARG VERSION -ARG GIT_REF=origin/master -``` - -- **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` -- **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)` -- **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`. -- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `origin/master`. +1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. +2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. To build the image you can use the following command -``` bash -docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) +```bash +docker build --tag matrixconduit/matrix-conduit:latest . ``` which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`. -**Note:** it ommits the two optional `build-arg`s. - ### Run After building the image you can simply run it with -``` bash +```bash docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest ``` or you can skip the build step and pull the image from one of the following registries: -| Registry | Image | Size | -| --------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | -| Docker Hub | [matrixconduit/matrix-conduit:latest](https://hub.docker.com/r/matrixconduit/matrix-conduit) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/conduit:latest](https://gitlab.com/famedly/conduit/container_registry/2134341) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | +| Registry | Image | Size | +| --------------- | --------------------------------------------------------------- | --------------------- | +| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] | +| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | + +[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit +[gl]: https://gitlab.com/famedly/conduit/container_registry/ +[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need @@ -56,29 +44,26 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. - ## Docker-compose If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying Conduit can be found [here](../DEPLOY.md). - ### Build To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: -``` bash -CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up +```bash +docker-compose up ``` -This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section. - +This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. ### Run If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with: -``` bash +```bash docker-compose up -d ``` @@ -101,32 +86,36 @@ So...step by step: 3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. 4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. 5. Create the files needed by the `well-known` service. - - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```nginx - server { - server_name .; - listen 80 default_server; - - location /.well-known/matrix/ { - root /var/www; - default_type application/json; - add_header Access-Control-Allow-Origin *; - } - } - ``` - - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.homeserver": { - "base_url": "https://." - } - } - ``` - - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.server": ".:443" - } - ``` + + - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) + + ```nginx + server { + server_name .; + listen 80 default_server; + + location /.well-known/matrix/ { + root /var/www; + default_type application/json; + add_header Access-Control-Allow-Origin *; + } + } + ``` + + - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) + ```json + { + "m.homeserver": { + "base_url": "https://." + } + } + ``` + - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) + ```json + { + "m.server": ".:443" + } + ``` + 6. Run `docker-compose up -d` 7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin. diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index fb674396..b51df7c1 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -1,3 +1,4 @@ +# syntax=docker/dockerfile:1 # --------------------------------------------------------------------------------------------------------- # This Dockerfile is intended to be built as part of Conduit's CI pipeline. # It does not build Conduit in Docker, but just copies the matching build artifact from the build job. @@ -7,20 +8,26 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM alpine:3.14 +FROM docker.io/alpine:3.14 AS runner -# Install packages needed to run Conduit +# Standard port on which Conduit launches. +# You still need to map the port when using the docker command or docker-compose. +EXPOSE 6167 + +# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" + +# Conduit needs: +# ca-certificates: for https +# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. RUN apk add --no-cache \ ca-certificates \ - curl \ libgcc + ARG CREATED ARG VERSION ARG GIT_REF - -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" - # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # including a custom label specifying the build command LABEL org.opencontainers.image.created=${CREATED} \ @@ -33,19 +40,24 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.revision=${GIT_REF} \ org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="" \ + org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ org.opencontainers.image.ref.name="" -# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose. -EXPOSE 6167 - -# create data folder for database +# Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit -# Copy the Conduit binary into the image at the latest possible moment to maximise caching: -COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit +# Test if Conduit is still alive, uses the same endpoint as Element COPY ./docker/healthcheck.sh /srv/conduit/ +HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh + + +# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64") +# copy the matching binary into this docker image +ARG TARGETPLATFORM +COPY ./$TARGETPLATFORM /srv/conduit/conduit + +# Improve security: Don't run stuff as root, that does not need to run as root: # Add www-data user and group with UID 82, as used by alpine # https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install RUN set -x ; \ @@ -57,13 +69,11 @@ RUN set -x ; \ RUN chown -cR www-data:www-data /srv/conduit RUN chmod +x /srv/conduit/healthcheck.sh - -# Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh - -# Set user to www-data +# Change user to www-data USER www-data # Set container home directory WORKDIR /srv/conduit -# Run Conduit + +# Run Conduit and print backtraces on panics +ENV RUST_BACKTRACE=1 ENTRYPOINT [ "/srv/conduit/conduit" ] diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index 568838ec..7ca04602 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -7,7 +7,7 @@ fi # The actual health check. # We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. -# TODO: Change this to a single curl call. Do we have a config value that we can check for that? -curl --fail -s "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ - curl -k --fail -s "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ +# TODO: Change this to a single wget call. Do we have a config value that we can check for that? +wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ + wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ exit 1 From 9bfc7b34b6d72def7da19ccd1decbe1ac2c7e6db Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 25 Nov 2021 22:36:44 +0000 Subject: [PATCH 013/445] Fixes for !225 --- .gitlab-ci.yml | 87 +++++++++---------------- DEPLOY.md | 25 ++++--- Dockerfile | 4 +- docker/ci-binaries-packaging.Dockerfile | 2 +- 4 files changed, 45 insertions(+), 73 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6f2e0fe3..a8d43842 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -142,8 +142,12 @@ build:debug:cargo:x86_64-unknown-linux-musl: DOCKER_HOST: tcp://docker:2375/ DOCKER_TLS_CERTDIR: "" DOCKER_DRIVER: overlay2 - PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64/v8,linux/amd64" + PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64" DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" + cache: + paths: + - docker_cache + key: "$CI_JOB_NAME" before_script: - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY # Only log in to Dockerhub if the credentials are given: @@ -156,80 +160,51 @@ build:debug:cargo:x86_64-unknown-linux-musl: - mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64 - mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6 - mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7 - - mkdir -p linux/arm64/ && mv ./conduit-aarch64-unknown-linux-musl linux/arm64/v8 - # Actually create multiarch image: + - mv ./conduit-aarch64-unknown-linux-musl linux/arm64 + - 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"' + # Build and push image: - > docker buildx build --pull --push - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') + --cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache + --cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache + --build-arg CREATED=$CREATED --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" --platform "$PLATFORMS" - --tag "$GL_IMAGE_TAG" - --tag "$GL_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA" + --tag "$TAG" + --tag "$TAG-alpine" + --tag "$TAG-commit-$CI_COMMIT_SHORT_SHA" --file "$DOCKER_FILE" . - # Only try to push to docker hub, if auth data for dockerhub exists: - - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG"; fi - - if [ -n "${DOCKER_HUB}" ]; then docker push "$DH_IMAGE_TAG-commit-$CI_COMMIT_SHORT_SHA"; fi -build:docker:next: +docker:next:gitlab: extends: .docker-shared-settings rules: - if: '$CI_COMMIT_BRANCH == "next"' variables: - GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" - DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" -build:docker:master: +docker:next:dockerhub: + extends: .docker-shared-settings + rules: + - if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB' + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" + +docker:master:gitlab: extends: .docker-shared-settings rules: - if: '$CI_COMMIT_BRANCH == "master"' variables: - GL_IMAGE_TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" - DH_IMAGE_TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" -## Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image -#.docker-shared-settings: -# stage: "build docker image" -# needs: [] -# interruptible: true -# image: -# name: "gcr.io/kaniko-project/executor:debug" -# entrypoint: [""] -# tags: ["docker"] -# variables: -# # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache -# KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" -# before_script: -# - "mkdir -p /kaniko/.docker" -# - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' -# -# -#build:docker:next: -# extends: .docker-shared-settings -# needs: -# - "build:release:cargo:x86_64-unknown-linux-musl" -# script: -# - > -# /kaniko/executor -# $KANIKO_CACHE_ARGS -# --force -# --context $CI_PROJECT_DIR -# --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') -# --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) -# --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" -# --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" -# --destination "$CI_REGISTRY_IMAGE/conduit:next" -# --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" -# --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" -# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" -# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine" -# --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" -# rules: -# - if: '$CI_COMMIT_BRANCH == "next"' -# -# +docker:master:dockerhub: + extends: .docker-shared-settings + rules: + - if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB' + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" # --------------------------------------------------------------------- # # Run tests # diff --git a/DEPLOY.md b/DEPLOY.md index 6470c902..0058b93d 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -12,20 +12,17 @@ only offer Linux binaries. You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -| CPU Architecture | Download link | -| ------------------------------------------- | ----------------------- | -| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl] | -| armv6 | [Download][armv6-musl] | -| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl] | -| armv8 / aarch64 | [Download][armv8-musl] | - -[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl - -[armv6-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf - -[armv7-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf - -[armv8-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl +| CPU Architecture | Download stable version | +| ------------------------------------------- | ------------------------------ | +| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | +| armv6 | [Download][armv6-musl-master] | +| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | +| armv8 / aarch64 | [Download][armv8-musl-master] | + +[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl +[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf +[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf +[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl ```bash $ sudo wget -O /usr/local/bin/matrix-conduit diff --git a/Dockerfile b/Dockerfile index d137353a..6a9ea732 100644 --- a/Dockerfile +++ b/Dockerfile @@ -54,11 +54,11 @@ RUN apk add --no-cache \ RUN mkdir -p /srv/conduit/.local/share/conduit # Test if Conduit is still alive, uses the same endpoint as Element -COPY ./docker/healthcheck.sh /srv/conduit/ +COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh # Copy over the actual Conduit binary from the builder stage -COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/ +COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit # Improve security: Don't run stuff as root, that does not need to run as root: # Add www-data user and group with UID 82, as used by alpine diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index b51df7c1..4ab874dd 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -47,7 +47,7 @@ LABEL org.opencontainers.image.created=${CREATED} \ RUN mkdir -p /srv/conduit/.local/share/conduit # Test if Conduit is still alive, uses the same endpoint as Element -COPY ./docker/healthcheck.sh /srv/conduit/ +COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh From f91216dd3ce5f842c1c441d0bae5a852e689bccf Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 14 Dec 2021 11:16:02 +0100 Subject: [PATCH 014/445] CI: Optionally use sccache for compilation This moves compiler caching for incremental builds away from GitLab caching the whole target/ folder to caching each code unit in S3. This aleviates the need to zip and unzip and just caches on the fly. This feature is optional and gated behind the SCCACHE_BIN_URL env --- .gitlab-ci.yml | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index a8d43842..664b5ea3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -26,16 +26,19 @@ variables: cache: paths: - cargohome - - target/ - key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--release" + key: "build_cache--$TARGET--$CI_COMMIT_BRANCH" variables: CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" + CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow + CARGO_HOME: $CI_PROJECT_DIR/cargohome before_script: - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' + - "mkdir -p $CARGO_HOME" - "rustc --version && cargo --version && rustup show" # Print version info for debugging - "rustup target add $TARGET" + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi script: - time cargo build --target $TARGET --release - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' @@ -216,20 +219,20 @@ test:cargo: image: "rust:latest" tags: ["docker"] variables: - CARGO_HOME: "cargohome" + CARGO_HOME: "$CI_PROJECT_DIR/cargohome" + CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow cache: paths: - - target - cargohome - key: test_cache + key: "test_cache--$CI_COMMIT_BRANCH" interruptible: true before_script: - - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" + - mkdir -p $CARGO_HOME - apt-get update -yqq - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget - rustup component add clippy rustfmt - - wget "https://faulty-storage.de/gitlab-report" - - chmod +x ./gitlab-report + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check From adb518fa0df35ba85c2ff1c96a539dda085f8991 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 14 Dec 2021 11:16:40 +0100 Subject: [PATCH 015/445] CI: Use curl instead of wget The rust docker image already comes with curl, no need to install wget. --- .gitlab-ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 664b5ea3..1dedd8ff 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -229,8 +229,9 @@ test:cargo: before_script: - mkdir -p $CARGO_HOME - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config wget + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - rustup component add clippy rustfmt + - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi script: From 339a26f56c84da242d753a1894589f5923b0fd7e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 15 Dec 2021 10:14:20 +0000 Subject: [PATCH 016/445] Update docker images --- Dockerfile | 7 +++--- docker/ci-binaries-packaging.Dockerfile | 31 ++++++++++++------------- 2 files changed, 18 insertions(+), 20 deletions(-) diff --git a/Dockerfile b/Dockerfile index 6a9ea732..5812fdf9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -32,7 +32,7 @@ RUN touch src/main.rs && touch src/lib.rs && cargo build --release # --------------------------------------------------------------------------------------------------------------- # Stuff below this line actually ends up in the resulting docker image # --------------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.14 AS runner +FROM docker.io/alpine:3.15.0 AS runner # Standard port on which Conduit launches. # You still need to map the port when using the docker command or docker-compose. @@ -45,9 +45,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # ca-certificates: for https # libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. RUN apk add --no-cache \ - ca-certificates \ - curl \ - libgcc + ca-certificates \ + libgcc # Created directory for the database and media files diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 4ab874dd..f4603105 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -1,14 +1,13 @@ # syntax=docker/dockerfile:1 # --------------------------------------------------------------------------------------------------------- # This Dockerfile is intended to be built as part of Conduit's CI pipeline. -# It does not build Conduit in Docker, but just copies the matching build artifact from the build job. -# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary. +# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs. # # It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching. # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.14 AS runner +FROM docker.io/alpine:3.15.0 AS runner # Standard port on which Conduit launches. # You still need to map the port when using the docker command or docker-compose. @@ -21,8 +20,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # ca-certificates: for https # libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. RUN apk add --no-cache \ - ca-certificates \ - libgcc + ca-certificates \ + libgcc ARG CREATED @@ -31,17 +30,17 @@ ARG GIT_REF # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # including a custom label specifying the build command LABEL org.opencontainers.image.created=${CREATED} \ - org.opencontainers.image.authors="Conduit Contributors" \ - org.opencontainers.image.title="Conduit" \ - org.opencontainers.image.version=${VERSION} \ - org.opencontainers.image.vendor="Conduit Contributors" \ - org.opencontainers.image.description="A Matrix homeserver written in Rust" \ - org.opencontainers.image.url="https://conduit.rs/" \ - org.opencontainers.image.revision=${GIT_REF} \ - org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ - org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ - org.opencontainers.image.ref.name="" + org.opencontainers.image.authors="Conduit Contributors" \ + org.opencontainers.image.title="Conduit" \ + org.opencontainers.image.version=${VERSION} \ + org.opencontainers.image.vendor="Conduit Contributors" \ + org.opencontainers.image.description="A Matrix homeserver written in Rust" \ + org.opencontainers.image.url="https://conduit.rs/" \ + org.opencontainers.image.revision=${GIT_REF} \ + org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ + org.opencontainers.image.licenses="Apache-2.0" \ + org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ + org.opencontainers.image.ref.name="" # Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit From 1fc616320a2aa8ab02edbfca7620773f69abf797 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 26 Nov 2021 19:28:47 +0100 Subject: [PATCH 017/445] Use struct init shorthand --- src/client_server/voip.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 9c3b20d4..c9a98d9f 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -49,8 +49,8 @@ pub async fn turn_server_route( }; Ok(get_turn_server_info::Response { - username: username, - password: password, + username, + password, uris: db.globals.turn_uris().to_vec(), ttl: Duration::from_secs(db.globals.turn_ttl()), } From 892a0525f20a0a815e7d12f45a7c5a623de7844d Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 26 Nov 2021 20:36:40 +0100 Subject: [PATCH 018/445] Upgrade Ruma --- Cargo.lock | 43 ++++------ Cargo.toml | 2 +- src/client_server/account.rs | 15 ++-- src/client_server/capabilities.rs | 6 +- src/client_server/directory.rs | 2 +- src/client_server/keys.rs | 22 ++--- src/client_server/membership.rs | 41 ++++----- src/client_server/message.rs | 2 +- src/client_server/report.rs | 6 +- src/client_server/room.rs | 26 +++--- src/client_server/state.rs | 4 +- src/client_server/sync.rs | 27 +++--- src/client_server/voip.rs | 2 +- src/database.rs | 11 +-- src/database/admin.rs | 17 ++-- src/database/globals.rs | 14 +-- src/database/key_backups.rs | 6 +- src/database/pusher.rs | 4 +- src/database/rooms.rs | 138 +++++++++++++++--------------- src/database/rooms/edus.rs | 21 +++-- src/database/sending.rs | 4 +- src/database/users.rs | 35 ++++---- src/pdu.rs | 28 +++--- src/ruma_wrapper.rs | 6 +- src/server_server.rs | 119 ++++++++++++-------------- 25 files changed, 297 insertions(+), 304 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9682f2fe..8b25b478 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1516,12 +1516,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "paste" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" - [[package]] name = "pear" version = "0.2.3" @@ -1990,7 +1984,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "assign", "js_int", @@ -2011,7 +2005,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "bytes", "http", @@ -2027,7 +2021,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2038,7 +2032,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "ruma-api", "ruma-common", @@ -2052,7 +2046,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "assign", "bytes", @@ -2072,7 +2066,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "indexmap", "js_int", @@ -2087,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "indoc", "js_int", @@ -2103,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2114,7 +2108,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "js_int", "ruma-api", @@ -2129,9 +2123,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ - "paste", "percent-encoding", "rand 0.8.4", "ruma-identifiers-macros", @@ -2144,7 +2137,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2154,7 +2147,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "thiserror", ] @@ -2162,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "js_int", "ruma-api", @@ -2175,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "js_int", "ruma-api", @@ -2190,7 +2183,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "bytes", "form_urlencoded", @@ -2204,7 +2197,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2215,7 +2208,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2232,7 +2225,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=e7f01ca55a1eff437bad754bf0554cc09f44ec2a#e7f01ca55a1eff437bad754bf0554cc09f44ec2a" +source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 91c7e259..b24afb5c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "e7f01ca55a1eff437bad754bf0554cc09f44ec2a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "bba7d624425da2c65a834bbd0e633b7577488cdf", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 4b3ad0d4..d7c2f63e 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -11,10 +11,9 @@ use ruma::{ error::ErrorKind, r0::{ account::{ - change_password, deactivate, get_username_availability, register, whoami, - ThirdPartyIdRemovalStatus, + change_password, deactivate, get_3pids, get_username_availability, register, + whoami, ThirdPartyIdRemovalStatus, }, - contact::get_contacts, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, }, @@ -282,7 +281,7 @@ pub async fn register_route( let mut content = RoomCreateEventContent::new(conduit_user.clone()); content.federate = true; content.predecessor = None; - content.room_version = RoomVersionId::Version6; + content.room_version = RoomVersionId::V6; // 1. The room create event db.rooms.build_and_append_pdu( @@ -433,7 +432,7 @@ pub async fn register_route( )?; // Room alias - let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name()) + let alias: Box = format!("#admins:{}", db.globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); @@ -757,9 +756,9 @@ pub async fn deactivate_route( get("/_matrix/client/r0/account/3pid", data = "") )] pub async fn third_party_route( - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> ConduitResult { let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_contacts::Response::new(Vec::new()).into()) + Ok(get_3pids::Response::new(Vec::new()).into()) } diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index f86b23b5..c69b7cb2 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -22,12 +22,12 @@ pub async fn get_capabilities_route( _body: Ruma, ) -> ConduitResult { let mut available = BTreeMap::new(); - available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); - available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); + available.insert(RoomVersionId::V5, RoomVersionStability::Stable); + available.insert(RoomVersionId::V6, RoomVersionStability::Stable); let mut capabilities = Capabilities::new(); capabilities.room_versions = RoomVersionsCapability { - default: RoomVersionId::Version6, + default: RoomVersionId::V6, available, }; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 490f7524..5a1bc494 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -167,7 +167,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, - since: since.as_deref(), + since, filter: Filter { generic_search_term: filter.generic_search_term.as_deref(), }, diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index a44f5e9c..08ea6e76 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -316,7 +316,7 @@ pub async fn get_key_changes_route( pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, - device_keys_input: &BTreeMap>>, + device_keys_input: &BTreeMap, Vec>>, allowed_signatures: F, db: &Database, ) -> Result { @@ -328,6 +328,8 @@ pub(crate) async fn get_keys_helper bool>( let mut get_over_federation = HashMap::new(); for (user_id, device_ids) in device_keys_input { + let user_id: &UserId = &**user_id; + if user_id.server_name() != db.globals.server_name() { get_over_federation .entry(user_id.server_name()) @@ -355,11 +357,11 @@ pub(crate) async fn get_keys_helper bool>( container.insert(device_id, keys); } } - device_keys.insert(user_id.clone(), container); + device_keys.insert(user_id.to_owned(), container); } else { for device_id in device_ids { let mut container = BTreeMap::new(); - if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), device_id)? { + if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? { let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( Error::BadRequest( ErrorKind::InvalidParam, @@ -371,24 +373,24 @@ pub(crate) async fn get_keys_helper bool>( device_display_name: metadata.display_name, }; - container.insert(device_id.clone(), keys); + container.insert(device_id.to_owned(), keys); } - device_keys.insert(user_id.clone(), container); + device_keys.insert(user_id.to_owned(), container); } } if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { - master_keys.insert(user_id.clone(), master_key); + master_keys.insert(user_id.to_owned(), master_key); } if let Some(self_signing_key) = db .users .get_self_signing_key(user_id, &allowed_signatures)? { - self_signing_keys.insert(user_id.clone(), self_signing_key); + self_signing_keys.insert(user_id.to_owned(), self_signing_key); } if Some(user_id) == sender_user { if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { - user_signing_keys.insert(user_id.clone(), user_signing_key); + user_signing_keys.insert(user_id.to_owned(), user_signing_key); } } } @@ -400,7 +402,7 @@ pub(crate) async fn get_keys_helper bool>( .map(|(server, vec)| async move { let mut device_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { - device_keys_input_fed.insert(user_id.clone(), keys.clone()); + device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); } ( server, @@ -440,7 +442,7 @@ pub(crate) async fn get_keys_helper bool>( } pub(crate) async fn claim_keys_helper( - one_time_keys_input: &BTreeMap, DeviceKeyAlgorithm>>, + one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, db: &Database, ) -> Result { let mut one_time_keys = BTreeMap::new(); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ec685ec9..f65287da 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -64,7 +64,7 @@ pub async fn join_room_by_id_route( .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) + .filter_map(|sender| Box::::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -72,7 +72,7 @@ pub async fn join_room_by_id_route( let ret = join_room_by_id_helper( &db, - body.sender_user.as_ref(), + body.sender_user.as_deref(), &body.room_id, &servers, body.third_party_signed.as_ref(), @@ -101,7 +101,7 @@ pub async fn join_room_by_id_or_alias_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) { + let (servers, room_id) = match Box::::try_from(body.room_id_or_alias.clone()) { Ok(room_id) => { let mut servers: HashSet<_> = db .rooms @@ -111,7 +111,7 @@ pub async fn join_room_by_id_or_alias_route( .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) + .filter_map(|sender| Box::::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -127,7 +127,7 @@ pub async fn join_room_by_id_or_alias_route( let join_room_response = join_room_by_id_helper( &db, - body.sender_user.as_ref(), + body.sender_user.as_deref(), &room_id, &servers, body.third_party_signed.as_ref(), @@ -531,7 +531,7 @@ async fn join_room_by_id_helper( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -551,7 +551,7 @@ async fn join_room_by_id_helper( federation::membership::create_join_event_template::v1::Request { room_id, user_id: sender_user, - ver: &[RoomVersionId::Version5, RoomVersionId::Version6], + ver: &[RoomVersionId::V5, RoomVersionId::V6], }, ) .await; @@ -567,8 +567,7 @@ async fn join_room_by_id_helper( let room_version = match make_join_response.room_version { Some(room_version) - if room_version == RoomVersionId::Version5 - || room_version == RoomVersionId::Version6 => + if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 => { room_version } @@ -620,7 +619,7 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"); // Generate event id - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&join_event_stub, &room_version) .expect("ruma can calculate reference hashes") @@ -776,7 +775,7 @@ async fn join_room_by_id_helper( db.flush()?; - Ok(join_room_by_id::Response::new(room_id.clone()).into()) + Ok(join_room_by_id::Response::new(room_id.to_owned()).into()) } fn validate_and_add_event_id( @@ -784,12 +783,12 @@ fn validate_and_add_event_id( room_version: &RoomVersionId, pub_key_map: &RwLock>>, db: &Database, -) -> Result<(EventId, CanonicalJsonObject)> { +) -> Result<(Box, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") @@ -856,7 +855,7 @@ pub(crate) async fn invite_helper<'a>( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -892,9 +891,7 @@ pub(crate) async fn invite_helper<'a>( // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content - .map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + .map_or(RoomVersionId::V6, |create_event| create_event.room_version); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); @@ -939,9 +936,9 @@ pub(crate) async fn invite_helper<'a>( } let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender_user.clone(), + event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + room_id: room_id.to_owned(), + sender: sender_user.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -1014,7 +1011,7 @@ pub(crate) async fn invite_helper<'a>( }; // Generate event id - let expected_event_id = EventId::try_from(&*format!( + let expected_event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") @@ -1100,7 +1097,7 @@ pub(crate) async fn invite_helper<'a>( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; diff --git a/src/client_server/message.rs b/src/client_server/message.rs index abbbe8ea..0d006101 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -67,7 +67,7 @@ pub async fn send_message_event_route( )); } - let event_id = EventId::try_from( + let event_id = Box::::try_from( utils::string_from_bytes(&response) .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?, ) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 3dcb4d1c..2e6527d4 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -57,8 +57,7 @@ pub async fn report_event_route( Report Score: {}\n\ Report Reason: {}", sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason - ) - .to_owned(), + ), format!( "
Report received from: {0}\
  • Event Info
    • Event ID: {1}\ @@ -72,8 +71,7 @@ pub async fn report_event_route( pdu.sender, body.score, RawStr::new(&body.reason).html_escape() - ) - .to_owned(), + ), ), )); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 47c7ee6f..97b3f482 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -88,14 +88,17 @@ pub async fn create_room_route( )); } - let alias: Option = + let alias: Option> = body.room_alias_name .as_ref() .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length - let alias = - RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let alias = Box::::try_from(format!( + "#{}:{}", + localpart, + db.globals.server_name(), + )) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; if db.rooms.id_from_alias(&alias)?.is_some() { Err(Error::BadRequest( @@ -109,7 +112,7 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { + if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 { room_version } else { return Err(Error::BadRequest( @@ -118,7 +121,7 @@ pub async fn create_room_route( )); } } - None => RoomVersionId::Version6, + None => RoomVersionId::V6, }; let content = match &body.creation_content { @@ -164,7 +167,7 @@ pub async fn create_room_route( .get(), ); - if let Err(_) = de_result { + if de_result.is_err() { return Err(Error::BadRequest( ErrorKind::BadJson, "Invalid creation content", @@ -269,7 +272,7 @@ pub async fn create_room_route( PduBuilder { event_type: EventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(room_alias_id.clone()), + alias: Some(room_alias_id.to_owned()), alt_aliases: vec![], }) .expect("We checked that alias earlier, it must be fine"), @@ -505,10 +508,7 @@ pub async fn upgrade_room_route( ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !matches!( - body.new_version, - RoomVersionId::Version5 | RoomVersionId::Version6 - ) { + if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", @@ -605,7 +605,7 @@ pub async fn upgrade_room_route( .get(), ); - if let Err(_) = de_result { + if de_result.is_err() { return Err(Error::BadRequest( ErrorKind::BadJson, "Error forming creation event", diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 307bccab..0ba20620 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -267,7 +267,7 @@ async fn send_state_event_for_key_helper( event_type: EventType, json: &Raw, state_key: String, -) -> Result { +) -> Result> { let sender_user = sender; // TODO: Review this check, error if event is unparsable, use event type, allow alias if it @@ -303,7 +303,7 @@ async fn send_state_event_for_key_helper( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 65c07bc9..1060d917 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -54,15 +54,17 @@ use rocket::{get, tokio}; /// `since` will be cached #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/sync", data = "") + get("/_matrix/client/r0/sync", data = "") )] -#[tracing::instrument(skip(db, body))] +#[tracing::instrument(skip(db, req))] pub async fn sync_events_route( db: DatabaseGuard, - body: Ruma>, + req: Ruma>, ) -> Result, RumaResponse> { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + let body = req.body; + + let sender_user = req.sender_user.expect("user is authenticated"); + let sender_device = req.sender_device.expect("user is authenticated"); let arc_db = Arc::new(db); @@ -132,7 +134,7 @@ pub async fn sync_events_route( async fn sync_helper_wrapper( db: Arc, - sender_user: UserId, + sender_user: Box, sender_device: Box, since: Option, full_state: bool, @@ -176,7 +178,7 @@ async fn sync_helper_wrapper( async fn sync_helper( db: Arc, - sender_user: UserId, + sender_user: Box, sender_device: Box, since: Option, full_state: bool, @@ -296,9 +298,10 @@ async fn sync_helper( })?; if let Some(state_key) = &pdu.state_key { - let user_id = UserId::try_from(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; + let user_id = + Box::::try_from(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; // The membership was and still is invite or join if matches!( @@ -424,7 +427,7 @@ async fn sync_helper( } if let Some(state_key) = &state_event.state_key { - let user_id = UserId::try_from(state_key.clone()) + let user_id = Box::::try_from(state_key.clone()) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; if user_id == sender_user { @@ -793,7 +796,7 @@ fn share_encrypted_room( ) -> Result { Ok(db .rooms - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? + .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? .filter_map(|r| r.ok()) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index c9a98d9f..66a85f0f 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -26,7 +26,7 @@ pub async fn turn_server_route( let turn_secret = db.globals.turn_secret(); - let (username, password) = if turn_secret != "" { + let (username, password) = if !turn_secret.is_empty() { let expiry = SecondsSinceUnixEpoch::from_system_time( SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()), ) diff --git a/src/database.rs b/src/database.rs index 080e24b3..056d49ad 100644 --- a/src/database.rs +++ b/src/database.rs @@ -477,7 +477,8 @@ impl Database { // Set room member count for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { let room_id = - RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap(); + Box::::try_from(utils::string_from_bytes(&roomid).unwrap()) + .unwrap(); db.rooms.update_joined_count(&room_id, &db)?; } @@ -489,7 +490,7 @@ impl Database { if db.globals.database_version()? < 7 { // Upgrade state store - let mut last_roomstates: HashMap = HashMap::new(); + let mut last_roomstates: HashMap, u64> = HashMap::new(); let mut current_sstatehash: Option = None; let mut current_room = None; let mut current_state = HashSet::new(); @@ -570,7 +571,7 @@ impl Database { if let Some(current_sstatehash) = current_sstatehash { handle_state( current_sstatehash, - current_room.as_ref().unwrap(), + current_room.as_deref().unwrap(), current_state, &mut last_roomstates, )?; @@ -587,7 +588,7 @@ impl Database { .unwrap() .unwrap(); let event_id = - EventId::try_from(utils::string_from_bytes(&event_id).unwrap()) + Box::::try_from(utils::string_from_bytes(&event_id).unwrap()) .unwrap(); let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap(); @@ -604,7 +605,7 @@ impl Database { if let Some(current_sstatehash) = current_sstatehash { handle_state( current_sstatehash, - current_room.as_ref().unwrap(), + current_room.as_deref().unwrap(), current_state, &mut last_roomstates, )?; diff --git a/src/database/admin.rs b/src/database/admin.rs index 8d8559a5..07a487e2 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,13 +1,10 @@ -use std::{ - convert::{TryFrom, TryInto}, - sync::Arc, -}; +use std::{convert::TryFrom, sync::Arc}; use crate::{pdu::PduBuilder, Database}; use rocket::futures::{channel::mpsc, stream::StreamExt}; use ruma::{ events::{room::message::RoomMessageEventContent, EventType}, - UserId, + RoomAliasId, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; @@ -37,15 +34,17 @@ impl Admin { let guard = db.read().await; let conduit_user = - UserId::try_from(format!("@conduit:{}", guard.globals.server_name())) + Box::::try_from(format!("@conduit:{}", guard.globals.server_name())) .expect("@conduit:server_name is valid"); let conduit_room = guard .rooms .id_from_alias( - &format!("#admins:{}", guard.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid room alias"), + &Box::::try_from(format!( + "#admins:{}", + guard.globals.server_name() + )) + .expect("#admins:server_name is a valid room alias"), ) .unwrap(); diff --git a/src/database/globals.rs b/src/database/globals.rs index 05ecb568..098d8197 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -40,13 +40,13 @@ pub struct Globals { dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, pub(super) server_signingkeys: Arc, - pub bad_event_ratelimiter: Arc>>, + pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock), SyncHandle>>, - pub roomid_mutex_insert: RwLock>>>, - pub roomid_mutex_state: RwLock>>>, - pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub sync_receivers: RwLock, Box), SyncHandle>>, + pub roomid_mutex_insert: RwLock, Arc>>>, + pub roomid_mutex_state: RwLock, Arc>>>, + pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer pub rotate: RotationHandler, } @@ -254,7 +254,7 @@ impl Globals { &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result> { + ) -> Result, VerifyKey>> { // Not atomic, but this is not critical let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; @@ -293,7 +293,7 @@ impl Globals { pub fn signing_keys_for( &self, origin: &ServerName, - ) -> Result> { + ) -> Result, VerifyKey>> { let signingkeys = self .server_signingkeys .get(origin.as_bytes())? diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 98ea0111..3010a37b 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -209,13 +209,13 @@ impl KeyBackups { &self, user_id: &UserId, version: &str, - ) -> Result> { + ) -> Result, RoomKeyBackup>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::::new(); + let mut rooms = BTreeMap::, RoomKeyBackup>::new(); for result in self .backupkeyid_backup @@ -231,7 +231,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup session_id is invalid.") })?; - let room_id = RoomId::try_from( + let room_id = Box::::try_from( utils::string_from_bytes(parts.next().ok_or_else(|| { Error::bad_database("backupkeyid_backup key is invalid.") })?) diff --git a/src/database/pusher.rs b/src/database/pusher.rs index f53f137b..97ca85d8 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -234,7 +234,7 @@ pub fn get_actions<'a>( db: &Database, ) -> Result<&'a [Action]> { let ctx = PushConditionRoomCtx { - room_id: room_id.clone(), + room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently user_display_name: db .users @@ -277,7 +277,7 @@ async fn send_notice( let mut data_minus_url = pusher.data.clone(); // The url must be stripped off according to spec data_minus_url.url = None; - device.data = Some(data_minus_url); + device.data = data_minus_url; // Tweaks are only added if the format is NOT event_id_only if !event_id_only { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c5b795bd..ebd0941b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -107,14 +107,14 @@ pub struct Rooms { /// RoomId + EventId -> Parent PDU EventId. pub(super) referencedevents: Arc, - pub(super) pdu_cache: Mutex>>, + pub(super) pdu_cache: Mutex, Arc>>, pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex>, + pub(super) eventidshort_cache: Mutex, u64>>, pub(super) statekeyshort_cache: Mutex>, pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock>>>, - pub(super) appservice_in_room_cache: RwLock>>, + pub(super) our_real_users_cache: RwLock, Arc>>>>, + pub(super) appservice_in_room_cache: RwLock, HashMap>>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -434,7 +434,7 @@ impl Rooms { None => continue, }; - let user_id = match UserId::try_from(state_key) { + let user_id = match Box::::try_from(state_key) { Ok(id) => id, Err(_) => continue, }; @@ -742,7 +742,7 @@ impl Rooms { self.eventidshort_cache .lock() .unwrap() - .insert(event_id.clone(), short); + .insert(event_id.to_owned(), short); Ok(short) } @@ -871,8 +871,8 @@ impl Rooms { .get(&shorteventid.to_be_bytes())? .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - let event_id = Arc::new( - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + let event_id = Arc::from( + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?, @@ -1112,7 +1112,7 @@ impl Rooms { self.pdu_cache .lock() .unwrap() - .insert(event_id.clone(), Arc::clone(&pdu)); + .insert(event_id.to_owned(), Arc::clone(&pdu)); Ok(Some(pdu)) } else { Ok(None) @@ -1162,14 +1162,14 @@ impl Rooms { /// Returns the leaf pdus of a room. #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomid_pduleaves .scan_prefix(prefix) .map(|(_, bytes)| { - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) @@ -1178,7 +1178,7 @@ impl Rooms { } #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(prev.as_bytes()); @@ -1193,7 +1193,7 @@ impl Rooms { /// The provided `event_ids` become the new leaves, this allows a room to have multiple /// `prev_events`. #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { + pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1261,7 +1261,7 @@ impl Rooms { &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: &[EventId], + leaves: &[Box], db: &Database, ) -> Result> { let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); @@ -1420,7 +1420,7 @@ impl Rooms { } // if the state_key fails - let target_user_id = UserId::try_from(state_key.clone()) + let target_user_id = Box::::try_from(state_key.clone()) .expect("This state_key was previously validated"); let content = serde_json::from_str::(pdu.content.get()) @@ -1476,9 +1476,11 @@ impl Rooms { if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &format!("#admins:{}", db.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid room alias"), + &Box::::try_from(format!( + "#admins:{}", + db.globals.server_name() + )) + .expect("#admins:server_name is a valid room alias"), )? .as_ref() == Some(&pdu.room_id) @@ -1528,7 +1530,7 @@ impl Rooms { } "get_auth_chain" => { if args.len() == 1 { - if let Ok(event_id) = EventId::try_from(args[0]) { + if let Ok(event_id) = Box::::try_from(args[0]) { if let Some(event) = db.rooms.get_pdu_json(&event_id)? { let room_id_str = event .get("room_id") @@ -1539,12 +1541,12 @@ impl Rooms { ) })?; - let room_id = RoomId::try_from(room_id_str) + let room_id = Box::::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; let start = Instant::now(); let count = server_server::get_auth_chain( &room_id, - vec![Arc::new(event_id)], + vec![Arc::from(event_id)], db, )? .count(); @@ -1567,12 +1569,12 @@ impl Rooms { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", // Anything higher than version3 behaves the same ruma::signatures::reference_hash( &value, - &RoomVersionId::Version6 + &RoomVersionId::V6 ) .expect("ruma can calculate reference hashes") )) @@ -1622,7 +1624,7 @@ impl Rooms { } "get_pdu" => { if args.len() == 1 { - if let Ok(event_id) = EventId::try_from(args[0]) { + if let Ok(event_id) = Box::::try_from(args[0]) { let mut outlier = false; let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; @@ -1948,7 +1950,7 @@ impl Rooms { room_id: &RoomId, db: &Database, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result { + ) -> Result> { let PduBuilder { event_type, content, @@ -1985,9 +1987,7 @@ impl Rooms { // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content - .map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + .map_or(RoomVersionId::V6, |create_event| create_event.room_version); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = @@ -2016,9 +2016,9 @@ impl Rooms { } let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender.clone(), + event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + room_id: room_id.to_owned(), + sender: sender.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -2083,7 +2083,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - pdu.event_id = EventId::try_from(&*format!( + pdu.event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") @@ -2206,7 +2206,7 @@ impl Rooms { let mut first_pdu_id = prefix.clone(); first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - let user_id = user_id.clone(); + let user_id = user_id.to_owned(); Ok(self .pduid_pdu @@ -2243,7 +2243,7 @@ impl Rooms { let current: &[u8] = ¤t; - let user_id = user_id.clone(); + let user_id = user_id.to_owned(); Ok(self .pduid_pdu @@ -2280,7 +2280,7 @@ impl Rooms { let current: &[u8] = ¤t; - let user_id = user_id.clone(); + let user_id = user_id.to_owned(); Ok(self .pduid_pdu @@ -2412,7 +2412,7 @@ impl Rooms { for room_ids in direct_event.content.0.values_mut() { if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.clone()); + room_ids.push(room_id.to_owned()); room_ids_updated = true; } } @@ -2451,7 +2451,11 @@ impl Rooms { EventType::IgnoredUserList, )? .map_or(false, |ignored| { - ignored.content.ignored_users.contains(sender) + ignored + .content + .ignored_users + .iter() + .any(|user| user == sender) }); if is_ignored { @@ -2537,7 +2541,7 @@ impl Rooms { self.our_real_users_cache .write() .unwrap() - .insert(room_id.clone(), Arc::new(real_users)); + .insert(room_id.to_owned(), Arc::new(real_users)); for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { if !joined_servers.remove(&old_joined_server) { @@ -2582,7 +2586,7 @@ impl Rooms { &self, room_id: &RoomId, db: &Database, - ) -> Result>> { + ) -> Result>>> { let maybe = self .our_real_users_cache .read() @@ -2650,7 +2654,7 @@ impl Rooms { self.appservice_in_room_cache .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default() .insert(appservice.0.clone(), in_room); @@ -2694,7 +2698,7 @@ impl Rooms { .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -2754,7 +2758,7 @@ impl Rooms { .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) + .filter_map(|sender| Box::::try_from(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -2778,9 +2782,7 @@ impl Rooms { let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(version) - if version == RoomVersionId::Version5 || version == RoomVersionId::Version6 => - { + Some(version) if version == RoomVersionId::V5 || version == RoomVersionId::V6 => { version } _ => return Err(Error::BadServerResponse("Room version is not supported")), @@ -2817,7 +2819,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) .expect("ruma can calculate reference hashes") @@ -2902,11 +2904,11 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result> { + pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { - RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in alias_roomid is invalid unicode.") })?) .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) @@ -2918,7 +2920,7 @@ impl Rooms { pub fn room_aliases<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -2947,9 +2949,9 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator> + '_ { + pub fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { - RoomId::try_from( + Box::::try_from( utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in publicroomids is invalid unicode.") })?, @@ -3009,8 +3011,8 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn get_shared_rooms<'a>( &'a self, - users: Vec, - ) -> Result> + 'a> { + users: Vec>, + ) -> Result>> + 'a> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -3037,7 +3039,7 @@ impl Rooms { Ok(utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { - RoomId::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) @@ -3082,12 +3084,12 @@ impl Rooms { pub fn server_rooms<'a>( &'a self, server: &ServerName, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = server.as_bytes().to_vec(); prefix.push(0xff); self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3104,12 +3106,12 @@ impl Rooms { pub fn room_members<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3150,14 +3152,14 @@ impl Rooms { pub fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomuseroncejoinedids .scan_prefix(prefix) .map(|(key, _)| { - UserId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3176,14 +3178,14 @@ impl Rooms { pub fn room_members_invited<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomuserid_invitecount .scan_prefix(prefix) .map(|(key, _)| { - UserId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3232,11 +3234,11 @@ impl Rooms { pub fn rooms_joined<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { self.userroomid_joined .scan_prefix(user_id.as_bytes().to_vec()) .map(|(key, _)| { - RoomId::try_from( + Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3255,14 +3257,14 @@ impl Rooms { pub fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>)>> + 'a { + ) -> impl Iterator, Vec>)>> + 'a { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); self.userroomid_invitestate .scan_prefix(prefix) .map(|(key, state)| { - let room_id = RoomId::try_from( + let room_id = Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3328,14 +3330,14 @@ impl Rooms { pub fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>)>> + 'a { + ) -> impl Iterator, Vec>)>> + 'a { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); self.userroomid_leftstate .scan_prefix(prefix) .map(|(key, state)| { - let room_id = RoomId::try_from( + let room_id = Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 9a27e437..365211b6 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -76,8 +76,13 @@ impl RoomEdus { &'a self, room_id: &RoomId, since: u64, - ) -> impl Iterator)>> + 'a - { + ) -> impl Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + > + 'a { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let prefix2 = prefix.clone(); @@ -92,7 +97,7 @@ impl RoomEdus { let count = utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::try_from( + let user_id = Box::::try_from( utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) .map_err(|_| { Error::bad_database("Invalid readreceiptid userid bytes in db.") @@ -309,7 +314,7 @@ impl RoomEdus { .typingid_userid .scan_prefix(prefix) .map(|(_, user_id)| { - UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&user_id).map_err(|_| { Error::bad_database("User ID in typingid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid.")) @@ -449,7 +454,7 @@ impl RoomEdus { { // Send new presence events to set the user offline let count = globals.next_count()?.to_be_bytes(); - let user_id = utils::string_from_bytes(&user_id_bytes) + let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) .map_err(|_| { Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") })? @@ -475,7 +480,7 @@ impl RoomEdus { presence: PresenceState::Offline, status_msg: None, }, - sender: user_id.clone(), + sender: user_id.to_owned(), }) .expect("PresenceEvent can be serialized"), )?; @@ -498,7 +503,7 @@ impl RoomEdus { since: u64, _rooms: &super::Rooms, _globals: &super::super::globals::Globals, - ) -> Result> { + ) -> Result, PresenceEvent>> { //self.presence_maintain(rooms, globals)?; let mut prefix = room_id.as_bytes().to_vec(); @@ -513,7 +518,7 @@ impl RoomEdus { .iter_from(&*first_possible_edu, false) .take_while(|(key, _)| key.starts_with(&prefix)) { - let user_id = UserId::try_from( + let user_id = Box::::try_from( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() diff --git a/src/database/sending.rs b/src/database/sending.rs index bf0cc2c1..c27b5731 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -397,7 +397,7 @@ impl Sending { // Because synapse resyncs, we can just insert dummy data let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, - device_id: device_id!("dummy"), + device_id: device_id!("dummy").to_owned(), device_display_name: Some("Dummy".to_owned()), stream_id: uint!(1), prev_id: Vec::new(), @@ -584,7 +584,7 @@ impl Sending { } let userid = - UserId::try_from(utils::string_from_bytes(user).map_err(|_| { + Box::::try_from(utils::string_from_bytes(user).map_err(|_| { ( kind.clone(), Error::bad_database("Invalid push user string in db."), diff --git a/src/database/users.rs b/src/database/users.rs index d0da0714..4a084722 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -63,11 +63,11 @@ impl Users { globals: &super::globals::Globals, ) -> Result { let admin_room_alias_id = - RoomAliasId::try_from(format!("#admins:{}", globals.server_name())) + Box::::try_from(format!("#admins:{}", globals.server_name())) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - Ok(rooms.is_joined(user_id, &admin_room_id)?) + rooms.is_joined(user_id, &admin_room_id) } /// Create a new user account on this homeserver. @@ -85,7 +85,7 @@ impl Users { /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] - pub fn find_from_token(&self, token: &str) -> Result> { + pub fn find_from_token(&self, token: &str) -> Result, String)>> { self.token_userdeviceid .get(token.as_bytes())? .map_or(Ok(None), |bytes| { @@ -98,9 +98,11 @@ impl Users { })?; Ok(Some(( - UserId::try_from(utils::string_from_bytes(user_bytes).map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - })?) + Box::::try_from(utils::string_from_bytes(user_bytes).map_err( + |_| { + Error::bad_database("User ID in token_userdeviceid is invalid unicode.") + }, + )?) .map_err(|_| { Error::bad_database("User ID in token_userdeviceid is invalid.") })?, @@ -113,9 +115,9 @@ impl Users { /// Returns an iterator over all users on this homeserver. #[tracing::instrument(skip(self))] - pub fn iter(&self) -> impl Iterator> + '_ { + pub fn iter(&self) -> impl Iterator>> + '_ { self.userid_password.iter().map(|(bytes, _)| { - UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) @@ -181,20 +183,21 @@ impl Users { /// Get the avatar_url of a user. #[tracing::instrument(skip(self, user_id))] - pub fn avatar_url(&self, user_id: &UserId) -> Result> { + pub fn avatar_url(&self, user_id: &UserId) -> Result>> { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { let s = utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) + Box::::try_from(s) + .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) }) .transpose() } /// Sets a new avatar_url or removes it if avatar_url is None. #[tracing::instrument(skip(self, user_id, avatar_url))] - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; @@ -409,7 +412,7 @@ impl Users { device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, - ) -> Result> { + ) -> Result, OneTimeKey)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); @@ -459,7 +462,7 @@ impl Users { .scan_prefix(userdeviceid) .map(|(bytes, _)| { Ok::<_, Error>( - serde_json::from_slice::( + serde_json::from_slice::>( &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { Error::bad_database("OneTimeKey ID in db is invalid.") })?, @@ -632,7 +635,7 @@ impl Users { .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? .as_object_mut() .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.clone()) + .entry(sender_id.to_owned()) .or_insert_with(|| serde_json::Map::new().into()); signatures @@ -657,7 +660,7 @@ impl Users { user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator> + 'a { + ) -> impl Iterator>> + 'a { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -683,7 +686,7 @@ impl Users { } }) .map(|(_, bytes)| { - UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) diff --git a/src/pdu.rs b/src/pdu.rs index 0f99f43b..3c955976 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -13,7 +13,7 @@ use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, ops::Deref}; use tracing::warn; /// Content hashes of a PDU. @@ -25,20 +25,20 @@ pub struct EventHash { #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { - pub event_id: EventId, - pub room_id: RoomId, - pub sender: UserId, + pub event_id: Box, + pub room_id: Box, + pub sender: Box, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: EventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, - pub prev_events: Vec, + pub prev_events: Vec>, pub depth: UInt, - pub auth_events: Vec, + pub auth_events: Vec>, #[serde(skip_serializing_if = "Option::is_none")] - pub redacts: Option, + pub redacts: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unsigned: Option>, pub hashes: EventHash, @@ -295,15 +295,15 @@ impl state_res::Event for PduEvent { } fn prev_events(&self) -> Box + '_> { - Box::new(self.prev_events.iter()) + Box::new(self.prev_events.iter().map(Deref::deref)) } fn auth_events(&self) -> Box + '_> { - Box::new(self.auth_events.iter()) + Box::new(self.auth_events.iter().map(Deref::deref)) } fn redacts(&self) -> Option<&EventId> { - self.redacts.as_ref() + self.redacts.as_deref() } } @@ -331,16 +331,16 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, -) -> crate::Result<(EventId, CanonicalJsonObject)> { +) -> crate::Result<(Box, CanonicalJsonObject)> { let value = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&value, &RoomVersionId::V6) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); @@ -356,7 +356,7 @@ pub struct PduBuilder { pub content: Box, pub unsigned: Option>, pub state_key: Option, - pub redacts: Option, + pub redacts: Option>, } /// Direct conversion prevents loss of the empty `state_key` that ruma requires. diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 03c115cd..2cff2f5a 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -29,7 +29,7 @@ use { /// first. pub struct Ruma { pub body: T::Incoming, - pub sender_user: Option, + pub sender_user: Option>, pub sender_device: Option>, pub sender_servername: Option>, // This is None when body is not a valid string @@ -86,7 +86,7 @@ where registration .get("as_token") .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token.as_deref() == Some(as_token)) + .map_or(false, |as_token| token == Some(as_token)) }) { match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { @@ -103,7 +103,7 @@ where .unwrap() }, |string| { - UserId::try_from(string.expect("parsing to string always works")) + Box::::try_from(string.expect("parsing to string always works")) .unwrap() }, ); diff --git a/src/server_server.rs b/src/server_server.rs index 482edf0f..ec5bc345 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -552,7 +552,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { let mut verify_keys = BTreeMap::new(); verify_keys.insert( - ServerSigningKeyId::try_from( + Box::::try_from( format!("ed25519:{}", db.globals.keypair().version()).as_str(), ) .expect("found invalid server signing keys in DB"), @@ -736,7 +736,7 @@ pub async fn send_transaction_message_route( // 0. Check the server is in the room let room_id = match value .get("room_id") - .and_then(|id| RoomId::try_from(id.as_str()?).ok()) + .and_then(|id| Box::::try_from(id.as_str()?).ok()) { Some(id) => id, None => { @@ -1003,11 +1003,10 @@ pub(crate) async fn handle_incoming_pdu<'a>( // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events let mut graph = HashMap::new(); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec<_> = incoming_pdu + let mut todo_outlier_stack: Vec> = incoming_pdu .prev_events .iter() - .cloned() - .map(Arc::new) + .map(|x| Arc::from(&**x)) .collect(); let mut amount = 0; @@ -1027,7 +1026,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( if amount > 100 { // Max limit reached warn!("Max prev event limit reached!"); - graph.insert((*prev_event_id).clone(), HashSet::new()); + graph.insert((*prev_event_id).to_owned(), HashSet::new()); continue; } @@ -1038,27 +1037,27 @@ pub(crate) async fn handle_incoming_pdu<'a>( amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(Arc::new(prev_prev.clone()))); + todo_outlier_stack.push(dbg!(Arc::from(&**prev_prev))); } } graph.insert( - (*prev_event_id).clone(), + (*prev_event_id).to_owned(), pdu.prev_events.iter().cloned().collect(), ); } else { // Time based check failed - graph.insert((*prev_event_id).clone(), HashSet::new()); + graph.insert((*prev_event_id).to_owned(), HashSet::new()); } eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { // Get json failed - graph.insert((*prev_event_id).clone(), HashSet::new()); + graph.insert((*prev_event_id).to_owned(), HashSet::new()); } } else { // Fetch and handle failed - graph.insert((*prev_event_id).clone(), HashSet::new()); + graph.insert((*prev_event_id).to_owned(), HashSet::new()); } } @@ -1074,7 +1073,6 @@ pub(crate) async fn handle_incoming_pdu<'a>( .get(event_id) .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), - ruma::event_id!("$notimportant"), )) }) .map_err(|_| "Error sorting prev events".to_owned())?; @@ -1084,7 +1082,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( if errors >= 5 { break; } - if let Some((pdu, json)) = eventid_info.remove(&prev_id) { + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { continue; } @@ -1200,8 +1198,7 @@ fn handle_outlier_pdu<'a>( &incoming_pdu .auth_events .iter() - .cloned() - .map(Arc::new) + .map(|x| Arc::from(&**x)) .collect::>(), create_event, room_id, @@ -1331,7 +1328,7 @@ async fn upgrade_outlier_to_timeline_pdu( let mut state_at_incoming_event = None; if incoming_pdu.prev_events.len() == 1 { - let prev_event = &incoming_pdu.prev_events[0]; + let prev_event = &*incoming_pdu.prev_events[0]; let prev_event_sstatehash = db .rooms .pdu_shortstatehash(prev_event) @@ -1353,7 +1350,7 @@ async fn upgrade_outlier_to_timeline_pdu( .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - state.insert(shortstatekey, Arc::new(prev_event.clone())); + state.insert(shortstatekey, Arc::from(prev_event)); // Now it's the state after the pdu } @@ -1397,7 +1394,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); + leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); // Now it's the state after the pdu } @@ -1410,14 +1407,14 @@ async fn upgrade_outlier_to_timeline_pdu( .get_statekey_from_short(k) .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - state.insert(k, (*id).clone()); + state.insert(k, (*id).to_owned()); starting_events.push(id); } auth_chain_sets.push( get_auth_chain(room_id, starting_events, db) .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).clone()) + .map(|event_id| (*event_id).to_owned()) .collect(), ); @@ -1444,7 +1441,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, Arc::new(event_id))) + Ok((shortstatekey, Arc::from(event_id))) }) .collect::>()?, ), @@ -1479,8 +1476,7 @@ async fn upgrade_outlier_to_timeline_pdu( origin, &res.pdu_ids .iter() - .cloned() - .map(Arc::new) + .map(|x| Arc::from(&**x)) .collect::>(), create_event, room_id, @@ -1488,7 +1484,7 @@ async fn upgrade_outlier_to_timeline_pdu( ) .await; - let mut state = BTreeMap::new(); + let mut state: BTreeMap<_, Arc> = BTreeMap::new(); for (pdu, _) in state_vec { let state_key = pdu .state_key @@ -1502,7 +1498,7 @@ async fn upgrade_outlier_to_timeline_pdu( match state.entry(shortstatekey) { btree_map::Entry::Vacant(v) => { - v.insert(Arc::new(pdu.event_id.clone())); + v.insert(Arc::from(&*pdu.event_id)); } btree_map::Entry::Occupied(_) => return Err( "State event's type and state_key combination exists multiple times." @@ -1577,7 +1573,7 @@ async fn upgrade_outlier_to_timeline_pdu( .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -1715,7 +1711,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::new(leaf_pdu.event_id.clone())); + leaf_state.insert(shortstatekey, Arc::from(&*leaf_pdu.event_id)); // Now it's the state after the pdu } @@ -1730,7 +1726,7 @@ async fn upgrade_outlier_to_timeline_pdu( .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - state_after.insert(shortstatekey, Arc::new(incoming_pdu.event_id.clone())); + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } fork_states.push(state_after); @@ -1762,7 +1758,7 @@ async fn upgrade_outlier_to_timeline_pdu( db, ) .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).clone()) + .map(|event_id| (*event_id).to_owned()) .collect(), ); } @@ -1774,7 +1770,7 @@ async fn upgrade_outlier_to_timeline_pdu( .map(|(k, id)| { db.rooms .get_statekey_from_short(k) - .map(|k| (k, (*id).clone())) + .map(|k| (k, (*id).to_owned())) }) .collect::>>() }) @@ -1874,7 +1870,8 @@ pub(crate) fn fetch_and_handle_outliers<'a>( let mut pdus = vec![]; for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(id) { + if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) + { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { @@ -1914,7 +1911,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { - back_off((**id).clone()); + back_off((**id).to_owned()); continue; } }; @@ -1939,14 +1936,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>( Ok((pdu, json)) => (pdu, Some(json)), Err(e) => { warn!("Authentication of event {} failed: {:?}", id, e); - back_off((**id).clone()); + back_off((**id).to_owned()); continue; } } } Err(_) => { warn!("Failed to fetch event: {}", id); - back_off((**id).clone()); + back_off((**id).to_owned()); continue; } } @@ -2128,7 +2125,7 @@ fn append_incoming_pdu( db: &Database, pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: HashSet, + new_room_leaves: HashSet>, state_ids_compressed: HashSet, soft_fail: bool, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex @@ -2298,13 +2295,13 @@ fn get_auth_chain_inner( event_id: &EventId, db: &Database, ) -> Result> { - let mut todo = vec![event_id.clone()]; + let mut todo = vec![event_id.to_owned()]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { match db.rooms.get_pdu(&event_id) { Ok(Some(pdu)) => { - if &pdu.room_id != room_id { + if pdu.room_id != room_id { return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); } for auth_event in &pdu.auth_events { @@ -2314,7 +2311,7 @@ fn get_auth_chain_inner( if !found.contains(&sauthevent) { found.insert(sauthevent); - todo.push(auth_event.clone()); + todo.push(auth_event.to_owned()); } } } @@ -2363,7 +2360,7 @@ pub fn get_event_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = RoomId::try_from(room_id_str) + let room_id = Box::::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; if !db.rooms.server_in_room(sender_servername, &room_id)? { @@ -2417,7 +2414,7 @@ pub fn get_missing_events_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let event_room_id = RoomId::try_from(room_id_str) + let event_room_id = Box::::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; if event_room_id != body.room_id { @@ -2436,7 +2433,7 @@ pub fn get_missing_events_route( continue; } queued_events.extend_from_slice( - &serde_json::from_value::>( + &serde_json::from_value::>>( serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { Error::bad_database("Event in db has no prev_events field.") })?) @@ -2485,14 +2482,14 @@ pub fn get_event_authorization_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = RoomId::try_from(room_id_str) + let room_id = Box::::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; if !db.rooms.server_in_room(sender_servername, &room_id)? { return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); } - let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::new(body.event_id.clone())], &db)?; + let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -2550,7 +2547,7 @@ pub fn get_room_state_route( }) .collect(); - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; + let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids @@ -2606,13 +2603,13 @@ pub fn get_room_state_ids_route( .rooms .state_full_ids(shortstatehash)? .into_iter() - .map(|(_, id)| (*id).clone()) + .map(|(_, id)| (*id).to_owned()) .collect(); - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; + let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_room_state_ids::v1::Response { - auth_chain_ids: auth_chain_ids.map(|id| (*id).clone()).collect(), + auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), pdu_ids, } .into()) @@ -2671,9 +2668,8 @@ pub fn create_join_event_template_route( }; // If there was no create event yet, assume we are creating a version 6 room right now - let room_version_id = create_event_content.map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); + let room_version_id = + create_event_content.map_or(RoomVersionId::V6, |create_event| create_event.room_version); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); if !body.ver.contains(&room_version_id) { @@ -2726,7 +2722,7 @@ pub fn create_join_event_template_route( } let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), + event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), room_id: body.room_id.clone(), sender: body.user_id.clone(), origin_server_ts: utils::millis_since_unix_epoch() @@ -2838,7 +2834,7 @@ async fn create_join_event( .roomid_mutex_federation .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let mutex_lock = mutex.lock().await; @@ -2937,8 +2933,7 @@ pub async fn create_invite_route( return Err(Error::bad_config("Federation is disabled.")); } - if body.room_version != RoomVersionId::Version5 && body.room_version != RoomVersionId::Version6 - { + if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), @@ -2959,7 +2954,7 @@ pub async fn create_invite_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; // Generate event id - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&signed_event, &body.room_version) .expect("ruma can calculate reference hashes") @@ -2972,7 +2967,7 @@ pub async fn create_invite_route( CanonicalJsonValue::String(event_id.into()), ); - let sender = serde_json::from_value( + let sender: Box<_> = serde_json::from_value( signed_event .get("sender") .ok_or(Error::BadRequest( @@ -2984,7 +2979,7 @@ pub async fn create_invite_route( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; - let invited_user = serde_json::from_value( + let invited_user: Box<_> = serde_json::from_value( signed_event .get("state_key") .ok_or(Error::BadRequest( @@ -3263,7 +3258,7 @@ pub(crate) async fn fetch_required_signing_keys( // the PDUs and either cache the key or add it to the list that needs to be retrieved. fn get_server_keys_from_cache( pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap>, + servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, room_version: &RoomVersionId, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, db: &Database, @@ -3273,7 +3268,7 @@ fn get_server_keys_from_cache( Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = EventId::try_from(&*format!( + let event_id = Box::::try_from(&*format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") @@ -3353,7 +3348,7 @@ pub(crate) async fn fetch_join_signing_keys( pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { - let mut servers: BTreeMap, BTreeMap> = + let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = BTreeMap::new(); { @@ -3387,10 +3382,6 @@ pub(crate) async fn fetch_join_signing_keys( server, get_remote_server_keys_batch::v2::Request { server_keys: servers.clone(), - minimum_valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(60), - ) - .expect("time is valid"), }, ) .await From 41fef1da64ea792b9ae8827f04d72cf7bbc1c960 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 00:30:00 +0100 Subject: [PATCH 019/445] Remove unnecessary .to_string() calls --- src/server_server.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index ec5bc345..8a50d234 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -396,10 +396,7 @@ async fn find_actual_destination( } if let Some(port) = force_port { - FedDest::Named( - delegated_hostname, - format!(":{}", port.to_string()), - ) + FedDest::Named(delegated_hostname, format!(":{}", port)) } else { add_port_to_hostname(&delegated_hostname) } @@ -432,10 +429,7 @@ async fn find_actual_destination( } if let Some(port) = force_port { - FedDest::Named( - hostname.clone(), - format!(":{}", port.to_string()), - ) + FedDest::Named(hostname.clone(), format!(":{}", port)) } else { add_port_to_hostname(&hostname) } From bffddbd4879e950a52647126284acafde4c46df4 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 00:30:28 +0100 Subject: [PATCH 020/445] Simplify identifier parsing code --- src/client_server/account.rs | 11 ++---- src/client_server/membership.rs | 39 +++++++++++--------- src/client_server/message.rs | 16 +++----- src/client_server/room.rs | 18 +++------ src/client_server/sync.rs | 11 +++--- src/database.rs | 15 +++----- src/database/admin.rs | 18 ++++----- src/database/key_backups.rs | 4 +- src/database/rooms.rs | 65 ++++++++++++++++----------------- src/database/rooms/edus.rs | 24 +++++------- src/database/sending.rs | 31 ++++++++-------- src/database/users.rs | 21 +++++------ src/pdu.rs | 7 ++-- src/ruma_wrapper.rs | 6 +-- src/server_server.rs | 40 ++++++++++---------- 15 files changed, 147 insertions(+), 179 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index d7c2f63e..3149187f 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,8 +1,4 @@ -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, -}; +use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; @@ -396,9 +392,8 @@ pub async fn register_route( )?; // 6. Events implied by name and topic - let room_name = - Box::::try_from(format!("{} Admin Room", db.globals.server_name())) - .expect("Room name is valid"); + let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) + .expect("Room name is valid"); db.rooms.build_and_append_pdu( PduBuilder { event_type: EventType::RoomName, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index f65287da..6c7b7211 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -64,7 +64,7 @@ pub async fn join_room_by_id_route( .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| Box::::try_from(sender).ok()) + .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -92,16 +92,17 @@ pub async fn join_room_by_id_route( /// - If the server does not know about the room: asks other servers over federation #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/join/<_>", data = "") + post("/_matrix/client/r0/join/<_>", data = "") )] -#[tracing::instrument(skip(db, body))] +#[tracing::instrument(skip(db, req))] pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, - body: Ruma>, + req: Ruma>, ) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = req.body; + let sender_user = req.sender_user.as_ref().expect("user is authenticated"); - let (servers, room_id) = match Box::::try_from(body.room_id_or_alias.clone()) { + let (servers, room_id) = match Box::::try_from(body.room_id_or_alias) { Ok(room_id) => { let mut servers: HashSet<_> = db .rooms @@ -111,7 +112,7 @@ pub async fn join_room_by_id_or_alias_route( .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| Box::::try_from(sender).ok()) + .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -127,7 +128,7 @@ pub async fn join_room_by_id_or_alias_route( let join_room_response = join_room_by_id_helper( &db, - body.sender_user.as_deref(), + req.sender_user.as_deref(), &room_id, &servers, body.third_party_signed.as_ref(), @@ -619,12 +620,13 @@ async fn join_room_by_id_helper( .expect("event is valid, we just created it"); // Generate event id - let event_id = Box::::try_from(&*format!( + let event_id = format!( "${}", ruma::signatures::reference_hash(&join_event_stub, &room_version) .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); // Add event_id back join_event_stub.insert( @@ -642,7 +644,7 @@ async fn join_room_by_id_helper( remote_server, federation::membership::create_join_event::v2::Request { room_id, - event_id: &event_id, + event_id, pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) @@ -650,7 +652,7 @@ async fn join_room_by_id_helper( db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; - let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) + let pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = HashMap::new(); @@ -788,7 +790,7 @@ fn validate_and_add_event_id( error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = Box::::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") @@ -1011,12 +1013,13 @@ pub(crate) async fn invite_helper<'a>( }; // Generate event id - let expected_event_id = Box::::try_from(&*format!( + let expected_event_id = format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + ); + let expected_event_id = <&EventId>::try_from(expected_event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); let response = db .sending @@ -1025,7 +1028,7 @@ pub(crate) async fn invite_helper<'a>( user_id.server_name(), create_invite::v2::Request { room_id, - event_id: &expected_event_id, + event_id: expected_event_id, room_version: &room_version_id, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 0d006101..e5219433 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -5,13 +5,8 @@ use ruma::{ r0::message::{get_message_events, send_message_event}, }, events::EventType, - EventId, -}; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, }; +use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -67,11 +62,10 @@ pub async fn send_message_event_route( )); } - let event_id = Box::::try_from( - utils::string_from_bytes(&response) - .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?, - ) - .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; + let event_id = utils::string_from_bytes(&response) + .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; return Ok(send_message_event::Response { event_id }.into()); } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 97b3f482..83571f1d 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -26,12 +26,7 @@ use ruma::{ RoomAliasId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use std::{ - cmp::max, - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, -}; +use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc}; use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] @@ -93,12 +88,11 @@ pub async fn create_room_route( .as_ref() .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length - let alias = Box::::try_from(format!( - "#{}:{}", - localpart, - db.globals.server_name(), - )) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let alias = + RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name())) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") + })?; if db.rooms.id_from_alias(&alias)?.is_some() { Err(Error::BadRequest( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 1060d917..2e372f91 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -10,7 +10,7 @@ use ruma::{ }; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryInto, sync::Arc, time::Duration, }; @@ -298,10 +298,9 @@ async fn sync_helper( })?; if let Some(state_key) = &pdu.state_key { - let user_id = - Box::::try_from(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; + let user_id = UserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; // The membership was and still is invite or join if matches!( @@ -427,7 +426,7 @@ async fn sync_helper( } if let Some(state_key) = &state_event.state_key { - let user_id = Box::::try_from(state_key.clone()) + let user_id = UserId::parse(state_key.clone()) .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; if user_id == sender_user { diff --git a/src/database.rs b/src/database.rs index 056d49ad..84ca68dc 100644 --- a/src/database.rs +++ b/src/database.rs @@ -476,11 +476,9 @@ impl Database { if db.globals.database_version()? < 6 { // Set room member count for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { - let room_id = - Box::::try_from(utils::string_from_bytes(&roomid).unwrap()) - .unwrap(); - - db.rooms.update_joined_count(&room_id, &db)?; + let string = utils::string_from_bytes(&roomid).unwrap(); + let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); + db.rooms.update_joined_count(room_id, &db)?; } db.globals.bump_database_version(6)?; @@ -587,10 +585,9 @@ impl Database { .get(&seventid) .unwrap() .unwrap(); - let event_id = - Box::::try_from(utils::string_from_bytes(&event_id).unwrap()) - .unwrap(); - let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap(); + let string = utils::string_from_bytes(&event_id).unwrap(); + let event_id = <&EventId>::try_from(string.as_str()).unwrap(); + let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap(); if Some(&pdu.room_id) != current_room.as_ref() { current_room = Some(pdu.room_id.clone()); diff --git a/src/database/admin.rs b/src/database/admin.rs index 07a487e2..1e5c47c9 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,10 +1,10 @@ -use std::{convert::TryFrom, sync::Arc}; +use std::{convert::TryInto, sync::Arc}; use crate::{pdu::PduBuilder, Database}; use rocket::futures::{channel::mpsc, stream::StreamExt}; use ruma::{ events::{room::message::RoomMessageEventContent, EventType}, - RoomAliasId, UserId, + UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; @@ -33,18 +33,16 @@ impl Admin { let guard = db.read().await; - let conduit_user = - Box::::try_from(format!("@conduit:{}", guard.globals.server_name())) - .expect("@conduit:server_name is valid"); + let conduit_user = UserId::parse(format!("@conduit:{}", guard.globals.server_name())) + .expect("@conduit:server_name is valid"); let conduit_room = guard .rooms .id_from_alias( - &Box::::try_from(format!( - "#admins:{}", - guard.globals.server_name() - )) - .expect("#admins:server_name is a valid room alias"), + format!("#admins:{}", guard.globals.server_name()) + .as_str() + .try_into() + .expect("#admins:server_name is a valid room alias"), ) .unwrap(); diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 3010a37b..56963c08 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -6,7 +6,7 @@ use ruma::{ }, RoomId, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; +use std::{collections::BTreeMap, sync::Arc}; use super::abstraction::Tree; @@ -231,7 +231,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup session_id is invalid.") })?; - let room_id = Box::::try_from( + let room_id = RoomId::parse( utils::string_from_bytes(parts.next().ok_or_else(|| { Error::bad_database("backupkeyid_backup key is invalid.") })?) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index ebd0941b..f8d2cad8 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -434,7 +434,7 @@ impl Rooms { None => continue, }; - let user_id = match Box::::try_from(state_key) { + let user_id = match UserId::parse(state_key) { Ok(id) => id, Err(_) => continue, }; @@ -871,12 +871,10 @@ impl Rooms { .get(&shorteventid.to_be_bytes())? .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - let event_id = Arc::from( - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?, - ); + let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; self.shorteventid_cache .lock() @@ -1169,7 +1167,7 @@ impl Rooms { self.roomid_pduleaves .scan_prefix(prefix) .map(|(_, bytes)| { - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + EventId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) @@ -1420,7 +1418,7 @@ impl Rooms { } // if the state_key fails - let target_user_id = Box::::try_from(state_key.clone()) + let target_user_id = UserId::parse(state_key.clone()) .expect("This state_key was previously validated"); let content = serde_json::from_str::(pdu.content.get()) @@ -1476,10 +1474,9 @@ impl Rooms { if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) && self .id_from_alias( - &Box::::try_from(format!( - "#admins:{}", - db.globals.server_name() - )) + <&RoomAliasId>::try_from( + format!("#admins:{}", db.globals.server_name()).as_str(), + ) .expect("#admins:server_name is a valid room alias"), )? .as_ref() @@ -1530,7 +1527,7 @@ impl Rooms { } "get_auth_chain" => { if args.len() == 1 { - if let Ok(event_id) = Box::::try_from(args[0]) { + if let Ok(event_id) = EventId::parse_arc(args[0]) { if let Some(event) = db.rooms.get_pdu_json(&event_id)? { let room_id_str = event .get("room_id") @@ -1541,12 +1538,12 @@ impl Rooms { ) })?; - let room_id = Box::::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; let start = Instant::now(); let count = server_server::get_auth_chain( - &room_id, - vec![Arc::from(event_id)], + room_id, + vec![event_id], db, )? .count(); @@ -1569,7 +1566,7 @@ impl Rooms { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { - let event_id = Box::::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", // Anything higher than version3 behaves the same ruma::signatures::reference_hash( @@ -1624,7 +1621,7 @@ impl Rooms { } "get_pdu" => { if args.len() == 1 { - if let Ok(event_id) = Box::::try_from(args[0]) { + if let Ok(event_id) = EventId::parse(args[0]) { let mut outlier = false; let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; @@ -2083,7 +2080,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - pdu.event_id = Box::::try_from(&*format!( + pdu.event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") @@ -2758,7 +2755,7 @@ impl Rooms { .filter_map(|event| serde_json::from_str(event.json().get()).ok()) .filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| Box::::try_from(sender).ok()) + .filter_map(|sender| UserId::parse(sender).ok()) .map(|user| user.server_name().to_owned()) .collect(); @@ -2819,7 +2816,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - let event_id = Box::::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) .expect("ruma can calculate reference hashes") @@ -2908,7 +2905,7 @@ impl Rooms { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in alias_roomid is invalid unicode.") })?) .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) @@ -2951,7 +2948,7 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { - Box::::try_from( + RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in publicroomids is invalid unicode.") })?, @@ -3039,7 +3036,7 @@ impl Rooms { Ok(utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { - Box::::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { + RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) @@ -3056,7 +3053,7 @@ impl Rooms { prefix.push(0xff); self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - Box::::try_from( + ServerName::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3089,7 +3086,7 @@ impl Rooms { prefix.push(0xff); self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - Box::::try_from( + RoomId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3111,7 +3108,7 @@ impl Rooms { prefix.push(0xff); self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - Box::::try_from( + UserId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3159,7 +3156,7 @@ impl Rooms { self.roomuseroncejoinedids .scan_prefix(prefix) .map(|(key, _)| { - Box::::try_from( + UserId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3185,7 +3182,7 @@ impl Rooms { self.roomuserid_invitecount .scan_prefix(prefix) .map(|(key, _)| { - Box::::try_from( + UserId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3238,7 +3235,7 @@ impl Rooms { self.userroomid_joined .scan_prefix(user_id.as_bytes().to_vec()) .map(|(key, _)| { - Box::::try_from( + RoomId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3264,7 +3261,7 @@ impl Rooms { self.userroomid_invitestate .scan_prefix(prefix) .map(|(key, state)| { - let room_id = Box::::try_from( + let room_id = RoomId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() @@ -3337,7 +3334,7 @@ impl Rooms { self.userroomid_leftstate .scan_prefix(prefix) .map(|(key, state)| { - let room_id = Box::::try_from( + let room_id = RoomId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 365211b6..eb2d3427 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -11,7 +11,7 @@ use ruma::{ }; use std::{ collections::{HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryInto, mem, sync::Arc, }; @@ -97,7 +97,7 @@ impl RoomEdus { let count = utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = Box::::try_from( + let user_id = UserId::parse( utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) .map_err(|_| { Error::bad_database("Invalid readreceiptid userid bytes in db.") @@ -310,17 +310,13 @@ impl RoomEdus { let mut user_ids = HashSet::new(); - for user_id in self - .typingid_userid - .scan_prefix(prefix) - .map(|(_, user_id)| { - Box::::try_from(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid.")) - }) - { - user_ids.insert(user_id?); + for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { + let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { + Error::bad_database("User ID in typingid_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + + user_ids.insert(user_id); } Ok(SyncEphemeralRoomEvent { @@ -518,7 +514,7 @@ impl RoomEdus { .iter_from(&*first_possible_edu, false) .take_while(|(key, _)| key.starts_with(&prefix)) { - let user_id = Box::::try_from( + let user_id = UserId::parse( utils::string_from_bytes( key.rsplit(|&b| b == 0xff) .next() diff --git a/src/database/sending.rs b/src/database/sending.rs index c27b5731..1e180d43 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,6 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, + convert::TryInto, fmt::Debug, sync::Arc, time::{Duration, Instant}, @@ -583,19 +583,18 @@ impl Sending { } } - let userid = - Box::::try_from(utils::string_from_bytes(user).map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user string in db."), - ) - })?) - .map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user id in db."), - ) - })?; + let userid = UserId::parse(utils::string_from_bytes(user).map_err(|_| { + ( + kind.clone(), + Error::bad_database("Invalid push user string in db."), + ) + })?) + .map_err(|_| { + ( + kind.clone(), + Error::bad_database("Invalid push user id in db."), + ) + })?; let mut senderkey = user.clone(); senderkey.push(0xff); @@ -732,7 +731,7 @@ impl Sending { })?; ( - OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { + OutgoingKind::Appservice(ServerName::parse(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), if value.is_empty() { @@ -771,7 +770,7 @@ impl Sending { })?; ( - OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { + OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { Error::bad_database("Invalid server string in server_currenttransaction") })?), if value.is_empty() { diff --git a/src/database/users.rs b/src/database/users.rs index 4a084722..d4bf4890 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,7 +8,7 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc}; +use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; use tracing::warn; use super::abstraction::Tree; @@ -62,9 +62,8 @@ impl Users { rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result { - let admin_room_alias_id = - Box::::try_from(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); rooms.is_joined(user_id, &admin_room_id) @@ -98,11 +97,9 @@ impl Users { })?; Ok(Some(( - Box::::try_from(utils::string_from_bytes(user_bytes).map_err( - |_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - }, - )?) + UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { + Error::bad_database("User ID in token_userdeviceid is invalid unicode.") + })?) .map_err(|_| { Error::bad_database("User ID in token_userdeviceid is invalid.") })?, @@ -117,7 +114,7 @@ impl Users { #[tracing::instrument(skip(self))] pub fn iter(&self) -> impl Iterator>> + '_ { self.userid_password.iter().map(|(bytes, _)| { - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) @@ -189,7 +186,7 @@ impl Users { .map(|bytes| { let s = utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - Box::::try_from(s) + s.try_into() .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) }) .transpose() @@ -686,7 +683,7 @@ impl Users { } }) .map(|(_, bytes)| { - Box::::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) diff --git a/src/pdu.rs b/src/pdu.rs index 3c955976..c1f3d27d 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -13,7 +13,7 @@ use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom, ops::Deref}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, ops::Deref}; use tracing::warn; /// Content hashes of a PDU. @@ -337,12 +337,13 @@ pub(crate) fn gen_event_id_canonical_json( Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = Box::::try_from(&*format!( + let event_id = format!( "${}", // Anything higher than version3 behaves the same ruma::signatures::reference_hash(&value, &RoomVersionId::V6) .expect("ruma can calculate reference hashes") - )) + ) + .try_into() .expect("ruma's reference hashes are valid event ids"); Ok((event_id, value)) diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 2cff2f5a..4b8d5dea 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -20,7 +20,6 @@ use { }, ruma::api::{AuthScheme, IncomingRequest}, std::collections::BTreeMap, - std::convert::TryFrom, std::io::Cursor, tracing::{debug, warn}, }; @@ -103,8 +102,7 @@ where .unwrap() }, |string| { - Box::::try_from(string.expect("parsing to string always works")) - .unwrap() + UserId::parse(string.expect("parsing to string always works")).unwrap() }, ); @@ -171,7 +169,7 @@ where } }; - let origin = match Box::::try_from(origin_str) { + let origin = match ServerName::parse(origin_str) { Ok(s) => s, _ => { warn!( diff --git a/src/server_server.rs b/src/server_server.rs index 8a50d234..b0e3f0f6 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -544,12 +544,11 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { return Json("Federation is disabled.".to_owned()); } - let mut verify_keys = BTreeMap::new(); + let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); verify_keys.insert( - Box::::try_from( - format!("ed25519:{}", db.globals.keypair().version()).as_str(), - ) - .expect("found invalid server signing keys in DB"), + format!("ed25519:{}", db.globals.keypair().version()) + .try_into() + .expect("found invalid server signing keys in DB"), VerifyKey { key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), }, @@ -730,7 +729,7 @@ pub async fn send_transaction_message_route( // 0. Check the server is in the room let room_id = match value .get("room_id") - .and_then(|id| Box::::try_from(id.as_str()?).ok()) + .and_then(|id| RoomId::parse(id.as_str()?).ok()) { Some(id) => id, None => { @@ -2354,10 +2353,10 @@ pub fn get_event_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = Box::::try_from(room_id_str) + let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, &room_id)? { + if !db.rooms.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); } @@ -2408,7 +2407,7 @@ pub fn get_missing_events_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let event_room_id = Box::::try_from(room_id_str) + let event_room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; if event_room_id != body.room_id { @@ -2476,14 +2475,14 @@ pub fn get_event_authorization_route( .and_then(|val| val.as_str()) .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - let room_id = Box::::try_from(room_id_str) + let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, &room_id)? { + if !db.rooms.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); } - let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::from(&*body.event_id)], &db)?; + let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -2948,7 +2947,7 @@ pub async fn create_invite_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; // Generate event id - let event_id = Box::::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&signed_event, &body.room_version) .expect("ruma can calculate reference hashes") @@ -3224,7 +3223,7 @@ pub(crate) async fn fetch_required_signing_keys( let fetch_res = fetch_signing_keys( db, - &Box::::try_from(&**signature_server).map_err(|_| { + signature_server.as_str().try_into().map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?, signature_ids, @@ -3262,19 +3261,20 @@ fn get_server_keys_from_cache( Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = Box::::try_from(&*format!( + let event_id = format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); if let Some((time, tries)) = db .globals .bad_event_ratelimiter .read() .unwrap() - .get(&event_id) + .get(event_id) { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); @@ -3308,7 +3308,7 @@ fn get_server_keys_from_cache( let contains_all_ids = |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - let origin = &Box::::try_from(&**signature_server).map_err(|_| { + let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?; @@ -3327,7 +3327,7 @@ fn get_server_keys_from_cache( if !contains_all_ids(&result) { trace!("Signing key not loaded for {}", origin); - servers.insert(origin.clone(), BTreeMap::new()); + servers.insert(origin.to_owned(), BTreeMap::new()); } pub_key_map.insert(origin.to_string(), result); From 58ea081762adb5f14ecaadc3e16f7b6dddcaed43 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 16:04:19 +0100 Subject: [PATCH 021/445] Use int! macro instead of Int::from --- src/client_server/report.rs | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 2e6527d4..ae069849 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,8 +1,11 @@ -use crate::{database::admin::AdminCommand, database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{ + database::{admin::AdminCommand, DatabaseGuard}, + ConduitResult, Error, Ruma, +}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, events::room::message, - Int, + int, }; #[cfg(feature = "conduit_bin")] @@ -33,7 +36,7 @@ pub async fn report_event_route( } }; - if body.score > Int::from(0) || body.score < Int::from(-100) { + if body.score > int!(0) || body.score < int!(-100) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", From f71245504726a717c8a85ecd99f83a7395bd3c2b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 16:35:59 +0100 Subject: [PATCH 022/445] Reduce EventId copying --- src/client_server/membership.rs | 3 ++- src/database/rooms.rs | 14 ++++++++++---- src/server_server.rs | 22 ++++++++-------------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 6c7b7211..e6c9d4b6 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -31,6 +31,7 @@ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, + iter, sync::{Arc, RwLock}, time::{Duration, Instant}, }; @@ -740,7 +741,7 @@ async fn join_room_by_id_helper( db.rooms.append_pdu( &pdu, utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), - &[pdu.event_id.clone()], + iter::once(&*pdu.event_id), db, )?; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index f8d2cad8..4c092bf7 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -36,6 +36,8 @@ use std::{ borrow::Cow, collections::{BTreeMap, HashMap, HashSet}, convert::{TryFrom, TryInto}, + fmt::Debug, + iter, mem::size_of, sync::{Arc, Mutex, RwLock}, time::Instant, @@ -1191,7 +1193,11 @@ impl Rooms { /// The provided `event_ids` become the new leaves, this allows a room to have multiple /// `prev_events`. #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> { + pub fn replace_pdu_leaves<'a>( + &self, + room_id: &RoomId, + event_ids: impl IntoIterator + Debug, + ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -1255,11 +1261,11 @@ impl Rooms { /// /// Returns pdu id #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu( + pub fn append_pdu<'a>( &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: &[Box], + leaves: impl IntoIterator + Debug, db: &Database, ) -> Result> { let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); @@ -2104,7 +2110,7 @@ impl Rooms { pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - &[pdu.event_id.clone()], + iter::once(&*pdu.event_id), db, )?; diff --git a/src/server_server.rs b/src/server_server.rs index b0e3f0f6..ca6bb3fd 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -64,6 +64,7 @@ use std::{ future::Future, mem, net::{IpAddr, SocketAddr}, + ops::Deref, pin::Pin, sync::{Arc, RwLock, RwLockWriteGuard}, time::{Duration, Instant, SystemTime}, @@ -1636,7 +1637,7 @@ async fn upgrade_outlier_to_timeline_pdu( db, &incoming_pdu, val, - extremities, + extremities.iter().map(Deref::deref), state_ids_compressed, soft_fail, &state_lock, @@ -1821,7 +1822,7 @@ async fn upgrade_outlier_to_timeline_pdu( db, &incoming_pdu, val, - extremities, + extremities.iter().map(Deref::deref), state_ids_compressed, soft_fail, &state_lock, @@ -2114,11 +2115,11 @@ pub(crate) async fn fetch_signing_keys( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))] -fn append_incoming_pdu( +fn append_incoming_pdu<'a>( db: &Database, pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: HashSet>, + new_room_leaves: impl IntoIterator + Clone + Debug, state_ids_compressed: HashSet, soft_fail: bool, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex @@ -2135,19 +2136,12 @@ fn append_incoming_pdu( if soft_fail { db.rooms .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves( - &pdu.room_id, - &new_room_leaves.into_iter().collect::>(), - )?; + db.rooms + .replace_pdu_leaves(&pdu.room_id, new_room_leaves.clone())?; return Ok(None); } - let pdu_id = db.rooms.append_pdu( - pdu, - pdu_json, - &new_room_leaves.into_iter().collect::>(), - db, - )?; + let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; for appservice in db.appservice.all()? { if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { From 0183d003d0bfd864eab08499fb7385b4c8e9df0a Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 15 Dec 2021 13:58:25 +0100 Subject: [PATCH 023/445] Revert rename of Ruma<_> parameters --- src/client_server/membership.rs | 12 ++++++------ src/client_server/push.rs | 10 +++++----- src/client_server/sync.rs | 13 ++++++------- 3 files changed, 17 insertions(+), 18 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e6c9d4b6..e28f9a31 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -93,15 +93,15 @@ pub async fn join_room_by_id_route( /// - If the server does not know about the room: asks other servers over federation #[cfg_attr( feature = "conduit_bin", - post("/_matrix/client/r0/join/<_>", data = "") + post("/_matrix/client/r0/join/<_>", data = "") )] -#[tracing::instrument(skip(db, req))] +#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, - req: Ruma>, + body: Ruma>, ) -> ConduitResult { - let body = req.body; - let sender_user = req.sender_user.as_ref().expect("user is authenticated"); + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); + let body = body.body; let (servers, room_id) = match Box::::try_from(body.room_id_or_alias) { Ok(room_id) => { @@ -129,7 +129,7 @@ pub async fn join_room_by_id_or_alias_route( let join_room_response = join_room_by_id_helper( &db, - req.sender_user.as_deref(), + Some(sender_user), &room_id, &servers, body.third_party_signed.as_ref(), diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 64f27f1c..a8ba1a2a 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -105,15 +105,15 @@ pub async fn get_pushrule_route( /// Creates a single specified push rule for this user. #[cfg_attr( feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") + put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") )] -#[tracing::instrument(skip(db, req))] +#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_route( db: DatabaseGuard, - req: Ruma>, + body: Ruma>, ) -> ConduitResult { - let sender_user = req.sender_user.as_ref().expect("user is authenticated"); - let body = req.body; + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; if body.scope != "global" { return Err(Error::BadRequest( diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2e372f91..9ba3b7fb 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -54,17 +54,16 @@ use rocket::{get, tokio}; /// `since` will be cached #[cfg_attr( feature = "conduit_bin", - get("/_matrix/client/r0/sync", data = "") + get("/_matrix/client/r0/sync", data = "") )] -#[tracing::instrument(skip(db, req))] +#[tracing::instrument(skip(db, body))] pub async fn sync_events_route( db: DatabaseGuard, - req: Ruma>, + body: Ruma>, ) -> Result, RumaResponse> { - let body = req.body; - - let sender_user = req.sender_user.expect("user is authenticated"); - let sender_device = req.sender_device.expect("user is authenticated"); + let sender_user = body.sender_user.expect("user is authenticated"); + let sender_device = body.sender_device.expect("user is authenticated"); + let body = body.body; let arc_db = Arc::new(db); From 34d3f74f363719ab60263da62477cf0cd56bbbb0 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 27 Nov 2021 17:44:52 +0100 Subject: [PATCH 024/445] Use Arc for EventIds in PDUs Upgrades Ruma again to make this work. --- Cargo.lock | 36 ++++++++++++++-------------- Cargo.toml | 4 ++-- src/client_server/account.rs | 4 ++++ src/client_server/membership.rs | 7 +++++- src/client_server/message.rs | 2 +- src/client_server/redact.rs | 4 +++- src/client_server/room.rs | 14 +++++------ src/client_server/state.rs | 4 +++- src/database/rooms.rs | 12 +++++----- src/pdu.rs | 28 ++++++++++++---------- src/server_server.rs | 42 +++++++++++++-------------------- 11 files changed, 81 insertions(+), 76 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8b25b478..fbf4b3f2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1984,7 +1984,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "assign", "js_int", @@ -2005,7 +2005,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "bytes", "http", @@ -2021,7 +2021,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2032,7 +2032,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "ruma-api", "ruma-common", @@ -2046,7 +2046,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "assign", "bytes", @@ -2066,7 +2066,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "indexmap", "js_int", @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "indoc", "js_int", @@ -2097,7 +2097,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2108,7 +2108,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "js_int", "ruma-api", @@ -2123,7 +2123,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2137,7 +2137,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2147,7 +2147,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "thiserror", ] @@ -2155,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "js_int", "ruma-api", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "js_int", "ruma-api", @@ -2183,7 +2183,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "bytes", "form_urlencoded", @@ -2197,7 +2197,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2208,7 +2208,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2225,7 +2225,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=bba7d624425da2c65a834bbd0e633b7577488cdf#bba7d624425da2c65a834bbd0e633b7577488cdf" +source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index b24afb5c..02159e31 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "bba7d624425da2c65a834bbd0e633b7577488cdf", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -40,7 +40,7 @@ serde_json = { version = "1.0.67", features = ["raw_value"] } # Used for appservice registration files serde_yaml = "0.8.20" # Used for pdu definition -serde = "1.0.130" +serde = { version = "1.0.130", features = ["rc"] } # Used for secure identifiers rand = "0.8.4" # Used to hash passwords diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 3149187f..c4e118c9 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -306,6 +306,7 @@ pub async fn register_route( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -463,6 +464,7 @@ pub async fn register_route( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -485,6 +487,7 @@ pub async fn register_route( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -701,6 +704,7 @@ pub async fn deactivate_route( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }; let mutex_state = Arc::clone( diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e28f9a31..cede51f0 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -286,6 +286,7 @@ pub async fn ban_user_route( third_party_invite: None, blurhash: db.users.blurhash(&body.user_id)?, reason: None, + join_authorized_via_users_server: None, }), |event| { serde_json::from_str(event.content.get()) @@ -604,6 +605,7 @@ async fn join_room_by_id_helper( third_party_invite: None, blurhash: db.users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), ); @@ -757,6 +759,7 @@ async fn join_room_by_id_helper( third_party_invite: None, blurhash: db.users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }; db.rooms.build_and_append_pdu( @@ -906,6 +909,7 @@ pub(crate) async fn invite_helper<'a>( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("member event is valid value"); @@ -939,7 +943,7 @@ pub(crate) async fn invite_helper<'a>( } let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), sender: sender_user.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() @@ -1117,6 +1121,7 @@ pub(crate) async fn invite_helper<'a>( third_party_invite: None, blurhash: db.users.blurhash(user_id)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index e5219433..60c756a3 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -98,7 +98,7 @@ pub async fn send_message_event_route( db.flush()?; - Ok(send_message_event::Response::new(event_id).into()) + Ok(send_message_event::Response::new((*event_id).to_owned()).into()) } /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 7435c5c5..85de2330 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -25,6 +25,7 @@ pub async fn redact_event_route( body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; let mutex_state = Arc::clone( db.globals @@ -45,7 +46,7 @@ pub async fn redact_event_route( .expect("event is valid, we just created it"), unsigned: None, state_key: None, - redacts: Some(body.event_id.clone()), + redacts: Some(body.event_id.into()), }, sender_user, &body.room_id, @@ -57,5 +58,6 @@ pub async fn redact_event_route( db.flush()?; + let event_id = (*event_id).to_owned(); Ok(redact_event::Response { event_id }.into()) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 83571f1d..52d25425 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -22,6 +22,7 @@ use ruma::{ }, EventType, }, + int, serde::{CanonicalJsonObject, JsonObject}, RoomAliasId, RoomId, RoomVersionId, }; @@ -195,6 +196,7 @@ pub async fn create_room_route( third_party_invite: None, blurhash: db.users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -220,11 +222,11 @@ pub async fn create_room_route( }); let mut users = BTreeMap::new(); - users.insert(sender_user.clone(), 100.into()); + users.insert(sender_user.clone(), int!(100)); if preset == create_room::RoomPreset::TrustedPrivateChat { for invite_ in &body.invite { - users.insert(invite_.clone(), 100.into()); + users.insert(invite_.clone(), int!(100)); } } @@ -569,7 +571,7 @@ pub async fn upgrade_room_route( // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( body.room_id.clone(), - tombstone_event_id, + (*tombstone_event_id).to_owned(), )); // Send a m.room.create event containing a predecessor field and the applicable room_version @@ -633,6 +635,7 @@ pub async fn upgrade_room_route( third_party_invite: None, blurhash: db.users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -697,10 +700,7 @@ pub async fn upgrade_room_route( .map_err(|_| Error::bad_database("Invalid room event in database."))?; // Setting events_default and invite to the greater of 50 and users_default + 1 - let new_level = max( - 50.into(), - power_levels_event_content.users_default + 1.into(), - ); + let new_level = max(int!(50), power_levels_event_content.users_default + int!(1)); power_levels_event_content.events_default = new_level; power_levels_event_content.invite = new_level; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 0ba20620..e42694ae 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -52,6 +52,7 @@ pub async fn send_state_event_for_key_route( db.flush()?; + let event_id = (*event_id).to_owned(); Ok(send_state_event::Response { event_id }.into()) } @@ -93,6 +94,7 @@ pub async fn send_state_event_for_empty_key_route( db.flush()?; + let event_id = (*event_id).to_owned(); Ok(send_state_event::Response { event_id }.into()) } @@ -267,7 +269,7 @@ async fn send_state_event_for_key_helper( event_type: EventType, json: &Raw, state_key: String, -) -> Result> { +) -> Result> { let sender_user = sender; // TODO: Review this check, error if event is unparsable, use event type, allow alias if it diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 4c092bf7..fb9ecbf0 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1162,14 +1162,14 @@ impl Rooms { /// Returns the leaf pdus of a room. #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); self.roomid_pduleaves .scan_prefix(prefix) .map(|(_, bytes)| { - EventId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?) .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) @@ -1178,7 +1178,7 @@ impl Rooms { } #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Box]) -> Result<()> { + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(prev.as_bytes()); @@ -1953,7 +1953,7 @@ impl Rooms { room_id: &RoomId, db: &Database, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { + ) -> Result> { let PduBuilder { event_type, content, @@ -2019,7 +2019,7 @@ impl Rooms { } let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), sender: sender.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() @@ -2086,7 +2086,7 @@ impl Rooms { .expect("event is valid, we just created it"); // Generate event id - pdu.event_id = EventId::parse(format!( + pdu.event_id = EventId::parse_arc(format!( "${}", ruma::signatures::reference_hash(&pdu_json, &room_version_id) .expect("ruma can calculate reference hashes") diff --git a/src/pdu.rs b/src/pdu.rs index c1f3d27d..db9375e4 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -13,7 +13,7 @@ use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, ops::Deref}; +use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc}; use tracing::warn; /// Content hashes of a PDU. @@ -25,7 +25,7 @@ pub struct EventHash { #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { - pub event_id: Box, + pub event_id: Arc, pub room_id: Box, pub sender: Box, pub origin_server_ts: UInt, @@ -34,11 +34,11 @@ pub struct PduEvent { pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, - pub prev_events: Vec>, + pub prev_events: Vec>, pub depth: UInt, - pub auth_events: Vec>, + pub auth_events: Vec>, #[serde(skip_serializing_if = "Option::is_none")] - pub redacts: Option>, + pub redacts: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unsigned: Option>, pub hashes: EventHash, @@ -266,7 +266,9 @@ impl PduEvent { } impl state_res::Event for PduEvent { - fn event_id(&self) -> &EventId { + type Id = Arc; + + fn event_id(&self) -> &Self::Id { &self.event_id } @@ -294,16 +296,16 @@ impl state_res::Event for PduEvent { self.state_key.as_deref() } - fn prev_events(&self) -> Box + '_> { - Box::new(self.prev_events.iter().map(Deref::deref)) + fn prev_events(&self) -> Box + '_> { + Box::new(self.prev_events.iter()) } - fn auth_events(&self) -> Box + '_> { - Box::new(self.auth_events.iter().map(Deref::deref)) + fn auth_events(&self) -> Box + '_> { + Box::new(self.auth_events.iter()) } - fn redacts(&self) -> Option<&EventId> { - self.redacts.as_deref() + fn redacts(&self) -> Option<&Self::Id> { + self.redacts.as_ref() } } @@ -357,7 +359,7 @@ pub struct PduBuilder { pub content: Box, pub unsigned: Option>, pub state_key: Option, - pub redacts: Option>, + pub redacts: Option>, } /// Direct conversion prevents loss of the empty `state_key` that ruma requires. diff --git a/src/server_server.rs b/src/server_server.rs index ca6bb3fd..594152ae 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -995,13 +995,9 @@ pub(crate) async fn handle_incoming_pdu<'a>( } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let mut graph = HashMap::new(); + let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = incoming_pdu - .prev_events - .iter() - .map(|x| Arc::from(&**x)) - .collect(); + let mut todo_outlier_stack: Vec> = incoming_pdu.prev_events.clone(); let mut amount = 0; @@ -1020,7 +1016,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( if amount > 100 { // Max limit reached warn!("Max prev event limit reached!"); - graph.insert((*prev_event_id).to_owned(), HashSet::new()); + graph.insert(prev_event_id.clone(), HashSet::new()); continue; } @@ -1031,27 +1027,27 @@ pub(crate) async fn handle_incoming_pdu<'a>( amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(Arc::from(&**prev_prev))); + todo_outlier_stack.push(dbg!(prev_prev.clone())); } } graph.insert( - (*prev_event_id).to_owned(), + prev_event_id.clone(), pdu.prev_events.iter().cloned().collect(), ); } else { // Time based check failed - graph.insert((*prev_event_id).to_owned(), HashSet::new()); + graph.insert(prev_event_id.clone(), HashSet::new()); } eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { // Get json failed - graph.insert((*prev_event_id).to_owned(), HashSet::new()); + graph.insert(prev_event_id.clone(), HashSet::new()); } } else { // Fetch and handle failed - graph.insert((*prev_event_id).to_owned(), HashSet::new()); + graph.insert(prev_event_id.clone(), HashSet::new()); } } @@ -1401,14 +1397,13 @@ async fn upgrade_outlier_to_timeline_pdu( .get_statekey_from_short(k) .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - state.insert(k, (*id).to_owned()); + state.insert(k, id.clone()); starting_events.push(id); } auth_chain_sets.push( get_auth_chain(room_id, starting_events, db) .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).to_owned()) .collect(), ); @@ -1435,7 +1430,7 @@ async fn upgrade_outlier_to_timeline_pdu( .rooms .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, Arc::from(event_id))) + Ok((shortstatekey, event_id)) }) .collect::>()?, ), @@ -1752,7 +1747,6 @@ async fn upgrade_outlier_to_timeline_pdu( db, ) .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).to_owned()) .collect(), ); } @@ -1761,11 +1755,7 @@ async fn upgrade_outlier_to_timeline_pdu( .into_iter() .map(|map| { map.into_iter() - .map(|(k, id)| { - db.rooms - .get_statekey_from_short(k) - .map(|k| (k, (*id).to_owned())) - }) + .map(|(k, id)| db.rooms.get_statekey_from_short(k).map(|k| (k, id))) .collect::>>() }) .collect::>() @@ -2136,8 +2126,7 @@ fn append_incoming_pdu<'a>( if soft_fail { db.rooms .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms - .replace_pdu_leaves(&pdu.room_id, new_room_leaves.clone())?; + db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; return Ok(None); } @@ -2282,7 +2271,7 @@ fn get_auth_chain_inner( event_id: &EventId, db: &Database, ) -> Result> { - let mut todo = vec![event_id.to_owned()]; + let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { @@ -2298,7 +2287,7 @@ fn get_auth_chain_inner( if !found.contains(&sauthevent) { found.insert(sauthevent); - todo.push(auth_event.to_owned()); + todo.push(auth_event.clone()); } } } @@ -2676,6 +2665,7 @@ pub fn create_join_event_template_route( membership: MembershipState::Join, third_party_invite: None, reason: None, + join_authorized_via_users_server: None, }) .expect("member event is valid value"); @@ -2709,7 +2699,7 @@ pub fn create_join_event_template_route( } let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").to_owned(), + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: body.room_id.clone(), sender: body.user_id.clone(), origin_server_ts: utils::millis_since_unix_epoch() From 3d25d46dc5b14c506692ea8a82151b6e4f39fafd Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Wed, 20 Oct 2021 06:20:34 +0200 Subject: [PATCH 025/445] Use simple BTreeMap to store uiaa requests some uiaa requests contain plaintext passwords which should never be persisted to disk. Currently there is no cleanup implemented (you have to restart conduit) --- src/database.rs | 3 +-- src/database/uiaa.rs | 16 +++++++++------- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/src/database.rs b/src/database.rs index 84ca68dc..83b0fd5e 100644 --- a/src/database.rs +++ b/src/database.rs @@ -250,8 +250,7 @@ impl Database { }, uiaa: uiaa::Uiaa { userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, - userdevicesessionid_uiaarequest: builder - .open_tree("userdevicesessionid_uiaarequest")?, + userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), }, rooms: rooms::Rooms { edus: rooms::RoomEdus { diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 1c0fb566..2ecca93d 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,4 +1,6 @@ use std::sync::Arc; +use std::sync::RwLock; +use std::collections::BTreeMap; use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ @@ -18,7 +20,7 @@ use super::abstraction::Tree; pub struct Uiaa { pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: Arc, // UiaaRequest = canonical json value + pub(super) userdevicesessionid_uiaarequest: RwLock, Vec>>, // UiaaRequest = canonical json value } impl Uiaa { @@ -153,10 +155,10 @@ impl Uiaa { userdevicesessionid.push(0xff); userdevicesessionid.extend_from_slice(session.as_bytes()); - self.userdevicesessionid_uiaarequest.insert( - &userdevicesessionid, - &serde_json::to_vec(request).expect("json value to vec always works"), - )?; + self.userdevicesessionid_uiaarequest.write().unwrap().insert( + userdevicesessionid, + serde_json::to_vec(request).expect("json value to vec always works"), + ); Ok(()) } @@ -173,8 +175,8 @@ impl Uiaa { userdevicesessionid.push(0xff); userdevicesessionid.extend_from_slice(session.as_bytes()); - self.userdevicesessionid_uiaarequest - .get(&userdevicesessionid)? + self.userdevicesessionid_uiaarequest.read().unwrap() + .get(&userdevicesessionid) .map(|bytes| { serde_json::from_str::( &utils::string_from_bytes(&bytes) From fe8cfe05569e667b03ee855a2463964a5a029661 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Tue, 14 Dec 2021 17:55:28 +0100 Subject: [PATCH 026/445] Add database migration to remove stored passwords uiaarequests can contain plaintext passwords, which were stored on disk --- src/database.rs | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/database.rs b/src/database.rs index 83b0fd5e..8b29b221 100644 --- a/src/database.rs +++ b/src/database.rs @@ -754,6 +754,15 @@ impl Database { println!("Migration: 9 -> 10 finished"); } + + if db.globals.database_version()? < 11 { + db._db + .open_tree("userdevicesessionid_uiaarequest")? + .clear()?; + db.globals.bump_database_version(11)?; + + println!("Migration: 10 -> 11 finished"); + } } let guard = db.read().await; From 0725b69abb7453df534a764947b6015ffe8293c4 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Sat, 18 Dec 2021 18:46:38 +0100 Subject: [PATCH 027/445] Clean up userdevicesessionid_uiaarequest BTreeMap There is no need to encode or decode anything as we are not saving to disk --- src/database/uiaa.rs | 52 ++++++++++++++++++++------------------------ 1 file changed, 24 insertions(+), 28 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 2ecca93d..461a3e27 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,6 +1,6 @@ +use std::collections::BTreeMap; use std::sync::Arc; use std::sync::RwLock; -use std::collections::BTreeMap; use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ @@ -20,7 +20,8 @@ use super::abstraction::Tree; pub struct Uiaa { pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: RwLock, Vec>>, // UiaaRequest = canonical json value + pub(super) userdevicesessionid_uiaarequest: + RwLock>, } impl Uiaa { @@ -149,16 +150,17 @@ impl Uiaa { session: &str, request: &CanonicalJsonValue, ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - self.userdevicesessionid_uiaarequest.write().unwrap().insert( - userdevicesessionid, - serde_json::to_vec(request).expect("json value to vec always works"), - ); + self.userdevicesessionid_uiaarequest + .write() + .unwrap() + .insert( + ( + user_id.to_owned(), + device_id.to_string(), + session.to_string(), + ), + request.to_owned(), + ); Ok(()) } @@ -169,22 +171,16 @@ impl Uiaa { device_id: &DeviceId, session: &str, ) -> Result> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - self.userdevicesessionid_uiaarequest.read().unwrap() - .get(&userdevicesessionid) - .map(|bytes| { - serde_json::from_str::( - &utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid uiaa request bytes in db."))?, - ) - .map_err(|_| Error::bad_database("Invalid uiaa request in db.")) - }) - .transpose() + Ok(self + .userdevicesessionid_uiaarequest + .read() + .unwrap() + .get(&( + user_id.to_owned(), + device_id.to_string(), + session.to_string(), + )) + .map(|j| j.to_owned())) } fn update_uiaa_session( From 720a54b3bb74301eaf08f54edd163995bf5ef7fa Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Sat, 18 Dec 2021 19:05:18 +0100 Subject: [PATCH 028/445] Use String to store UserId for uiaa request Fixes compilation error after ruma upgrade --- src/database/uiaa.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 461a3e27..6a5f7a33 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -21,7 +21,7 @@ use super::abstraction::Tree; pub struct Uiaa { pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication pub(super) userdevicesessionid_uiaarequest: - RwLock>, + RwLock>, } impl Uiaa { @@ -155,7 +155,7 @@ impl Uiaa { .unwrap() .insert( ( - user_id.to_owned(), + user_id.to_string(), device_id.to_string(), session.to_string(), ), @@ -176,7 +176,7 @@ impl Uiaa { .read() .unwrap() .get(&( - user_id.to_owned(), + user_id.to_string(), device_id.to_string(), session.to_string(), )) From 7857da8a0b6322618b12e4b41c6945bcd7dee9ef Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 20 Dec 2021 15:46:36 +0100 Subject: [PATCH 029/445] Add ability to remove an appservice --- APPSERVICES.md | 8 ++++++++ src/database/admin.rs | 4 ++++ src/database/appservice.rs | 9 +++++++++ src/database/rooms.rs | 9 +++++++++ 4 files changed, 30 insertions(+) diff --git a/APPSERVICES.md b/APPSERVICES.md index 26c34cc4..894bc6f4 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -42,6 +42,14 @@ could help. ## Appservice-specific instructions +### Remove an appservice + +To remove an appservice go to your admin room and execute + +```@conduit:your.server.name: unregister_appservice ``` + +where `` one of the output of `list_appservices`. + ### Tested appservices These appservices have been tested and work with Conduit without any extra steps: diff --git a/src/database/admin.rs b/src/database/admin.rs index 1e5c47c9..0702bcdd 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -12,6 +12,7 @@ use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), + UnregisterAppservice(String), ListAppservices, SendMessage(RoomMessageEventContent), } @@ -96,6 +97,9 @@ impl Admin { AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } + AdminCommand::UnregisterAppservice(service_name) => { + guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above + } AdminCommand::ListAppservices => { if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { let count = appservices.len(); diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 7cc91372..caa48ad0 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -27,6 +27,15 @@ impl Appservice { Ok(()) } + /** + * Remove an appservice registration + * service_name is the name you send to register the service + */ + pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { + self.id_appserviceregistrations.remove(service_name.as_bytes())?; + Ok(()) + } + pub fn get_registration(&self, id: &str) -> Result> { self.cached_registrations .read() diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fb9ecbf0..612bd51d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1528,6 +1528,15 @@ impl Rooms { )); } } + "unregister_appservice" => { + if args.len() == 1 { + db.admin.send(AdminCommand::UnregisterAppservice(args[0].to_owned())); + } else { + db.admin.send(AdminCommand::SendMessage( + RoomMessageEventContent::text_plain("Missing appservice identifier"), + )); + } + } "list_appservices" => { db.admin.send(AdminCommand::ListAppservices); } From b6c9582cf4e9255e0610a63849bb3c5113be16e2 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Wed, 22 Dec 2021 13:09:56 +0100 Subject: [PATCH 030/445] Fix doc style comment according to Rust; VSCode added line breaks --- src/database/appservice.rs | 12 +++++++----- src/database/rooms.rs | 8 ++++++-- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/src/database/appservice.rs b/src/database/appservice.rs index caa48ad0..910964a4 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -27,12 +27,14 @@ impl Appservice { Ok(()) } - /** - * Remove an appservice registration - * service_name is the name you send to register the service - */ + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { - self.id_appserviceregistrations.remove(service_name.as_bytes())?; + self.id_appserviceregistrations + .remove(service_name.as_bytes())?; Ok(()) } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 612bd51d..775e2f8d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1530,10 +1530,14 @@ impl Rooms { } "unregister_appservice" => { if args.len() == 1 { - db.admin.send(AdminCommand::UnregisterAppservice(args[0].to_owned())); + db.admin.send(AdminCommand::UnregisterAppservice( + args[0].to_owned(), + )); } else { db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain("Missing appservice identifier"), + RoomMessageEventContent::text_plain( + "Missing appservice identifier", + ), )); } } From 7f2445be6ca7798ec25458e5447b23e7aeea1f7f Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Wed, 22 Dec 2021 16:48:27 +0100 Subject: [PATCH 031/445] On unregister_appservice(service_name), remove the appservice service_name from cache too --- src/database/appservice.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 910964a4..847d7479 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -35,6 +35,10 @@ impl Appservice { pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { self.id_appserviceregistrations .remove(service_name.as_bytes())?; + self.cached_registrations. + write(). + unwrap(). + remove(service_name); Ok(()) } From c4a438460e0537e465f5b93514fd05b66a03ad37 Mon Sep 17 00:00:00 2001 From: Moritz Bitsch Date: Wed, 22 Dec 2021 19:26:23 +0100 Subject: [PATCH 032/445] Use Box to store UserID and DeviceID Userid and DeviceID are of unknown size, use Box to be able to store them into the userdevicesessionid_uiaarequest BTreeMap --- src/database/uiaa.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 6a5f7a33..772dab9e 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -21,7 +21,7 @@ use super::abstraction::Tree; pub struct Uiaa { pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication pub(super) userdevicesessionid_uiaarequest: - RwLock>, + RwLock, Box, String), CanonicalJsonValue>>, } impl Uiaa { @@ -155,8 +155,8 @@ impl Uiaa { .unwrap() .insert( ( - user_id.to_string(), - device_id.to_string(), + user_id.to_owned(), + device_id.to_owned(), session.to_string(), ), request.to_owned(), @@ -176,8 +176,8 @@ impl Uiaa { .read() .unwrap() .get(&( - user_id.to_string(), - device_id.to_string(), + user_id.to_owned(), + device_id.to_owned(), session.to_string(), )) .map(|j| j.to_owned())) From aba95b20f3b3c252e72ac87312b10df8068f7419 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 22 Dec 2021 19:41:33 +0100 Subject: [PATCH 033/445] Upgrade Ruma --- Cargo.lock | 56 ++++++++------- Cargo.toml | 4 +- src/client_server/keys.rs | 45 ++++++------ src/client_server/sync.rs | 2 + src/database/key_backups.rs | 65 +++++++++++------- src/database/users.rs | 133 ++++++++++++++++++++++-------------- 6 files changed, 184 insertions(+), 121 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index fbf4b3f2..69a026b2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -938,7 +938,7 @@ checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" dependencies = [ "bytes", "fnv", - "itoa", + "itoa 0.4.8", ] [[package]] @@ -979,7 +979,7 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa", + "itoa 0.4.8", "pin-project-lite", "socket2 0.4.1", "tokio", @@ -1114,6 +1114,12 @@ version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +[[package]] +name = "itoa" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" + [[package]] name = "jobserver" version = "0.1.24" @@ -1984,7 +1990,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "assign", "js_int", @@ -2005,7 +2011,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "bytes", "http", @@ -2021,7 +2027,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2032,7 +2038,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "ruma-api", "ruma-common", @@ -2046,7 +2052,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "assign", "bytes", @@ -2066,7 +2072,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "indexmap", "js_int", @@ -2081,7 +2087,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "indoc", "js_int", @@ -2097,7 +2103,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2108,7 +2114,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "js_int", "ruma-api", @@ -2123,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2137,7 +2143,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2147,7 +2153,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "thiserror", ] @@ -2155,7 +2161,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "js_int", "ruma-api", @@ -2168,7 +2174,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "js_int", "ruma-api", @@ -2183,11 +2189,11 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "bytes", "form_urlencoded", - "itoa", + "itoa 0.4.8", "js_int", "ruma-serde-macros", "serde", @@ -2197,7 +2203,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2208,7 +2214,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2225,7 +2231,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=16f031fabb7871fcd738b0f25391193ee4ca28a9#16f031fabb7871fcd738b0f25391193ee4ca28a9" +source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" dependencies = [ "itertools 0.10.1", "js_int", @@ -2404,11 +2410,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.67" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f9e390c27c3c0ce8bc5d725f6e4d30a29d26659494aa4b17535f7522c5c950" +checksum = "bcbd0344bc6533bc7ec56df11d42fb70f1b912351c0825ccb7211b59d8af7cf5" dependencies = [ - "itoa", + "itoa 1.0.1", "ryu", "serde", ] @@ -2420,7 +2426,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" dependencies = [ "form_urlencoded", - "itoa", + "itoa 0.4.8", "ryu", "serde", ] diff --git a/Cargo.toml b/Cargo.toml index 02159e31..e64e2751 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "16f031fabb7871fcd738b0f25391193ee4ca28a9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "7cf3abbaf02995b03db74429090ca5af1cd71edc", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -36,7 +36,7 @@ http = "0.2.4" # Used to find data directory for default db path directories = "3.0.2" # Used for ruma wrapper -serde_json = { version = "1.0.67", features = ["raw_value"] } +serde_json = { version = "1.0.70", features = ["raw_value"] } # Used for appservice registration files serde_yaml = "0.8.20" # Used for pdu definition diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 08ea6e76..be0675d8 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -15,7 +15,7 @@ use ruma::{ }, federation, }, - encryption::UnsignedDeviceInfo, + serde::Raw, DeviceId, DeviceKeyAlgorithm, UserId, }; use serde_json::json; @@ -42,16 +42,9 @@ pub async fn upload_keys_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if let Some(one_time_keys) = &body.one_time_keys { - for (key_key, key_value) in one_time_keys { - db.users.add_one_time_key( - sender_user, - sender_device, - key_key, - key_value, - &db.globals, - )?; - } + for (key_key, key_value) in &body.one_time_keys { + db.users + .add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?; } if let Some(device_keys) = &body.device_keys { @@ -350,10 +343,8 @@ pub(crate) async fn get_keys_helper bool>( Error::bad_database("all_device_keys contained nonexistent device.") })?; - keys.unsigned = UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }; - + add_unsigned_device_display_name(&mut keys, metadata) + .map_err(|_| Error::bad_database("invalid device keys in database"))?; container.insert(device_id, keys); } } @@ -369,10 +360,8 @@ pub(crate) async fn get_keys_helper bool>( ), )?; - keys.unsigned = UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }; - + add_unsigned_device_display_name(&mut keys, metadata) + .map_err(|_| Error::bad_database("invalid device keys in database"))?; container.insert(device_id.to_owned(), keys); } device_keys.insert(user_id.to_owned(), container); @@ -441,6 +430,24 @@ pub(crate) async fn get_keys_helper bool>( }) } +fn add_unsigned_device_display_name( + keys: &mut Raw, + metadata: ruma::api::client::r0::device::Device, +) -> serde_json::Result<()> { + if let Some(display_name) = metadata.display_name { + let mut object = keys.deserialize_as::>()?; + + let unsigned = object.entry("unsigned").or_insert_with(|| json!({})); + if let serde_json::Value::Object(unsigned_object) = unsigned { + unsigned_object.insert("device_display_name".to_owned(), display_name.into()); + } + + *keys = Raw::from_json(serde_json::value::to_raw_value(&object)?); + } + + Ok(()) +} + pub(crate) async fn claim_keys_helper( one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, db: &Database, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 9ba3b7fb..64588a2c 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -762,6 +762,8 @@ async fn sync_helper( .users .get_to_device_events(&sender_user, &sender_device)?, }, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, }; // TODO: Retry the endpoint instead of returning (waiting for #118) diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 56963c08..b74bc408 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -4,8 +4,10 @@ use ruma::{ error::ErrorKind, r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, }, + serde::Raw, RoomId, UserId, }; +use serde_json::json; use std::{collections::BTreeMap, sync::Arc}; use super::abstraction::Tree; @@ -20,7 +22,7 @@ impl KeyBackups { pub fn create_backup( &self, user_id: &UserId, - backup_metadata: &BackupAlgorithm, + backup_metadata: &Raw, globals: &super::globals::Globals, ) -> Result { let version = globals.next_count()?.to_string(); @@ -59,7 +61,7 @@ impl KeyBackups { &self, user_id: &UserId, version: &str, - backup_metadata: &BackupAlgorithm, + backup_metadata: &Raw, globals: &super::globals::Globals, ) -> Result { let mut key = user_id.as_bytes().to_vec(); @@ -73,12 +75,8 @@ impl KeyBackups { )); } - self.backupid_algorithm.insert( - &key, - serde_json::to_string(backup_metadata) - .expect("BackupAlgorithm::to_string always works") - .as_bytes(), - )?; + self.backupid_algorithm + .insert(&key, backup_metadata.json().get().as_bytes())?; self.backupid_etag .insert(&key, &globals.next_count()?.to_be_bytes())?; Ok(version.to_owned()) @@ -105,7 +103,10 @@ impl KeyBackups { .transpose() } - pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { + pub fn get_latest_backup( + &self, + user_id: &UserId, + ) -> Result)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); let mut last_possible_key = prefix.clone(); @@ -133,7 +134,11 @@ impl KeyBackups { .transpose() } - pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { + pub fn get_backup( + &self, + user_id: &UserId, + version: &str, + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -152,7 +157,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - key_data: &KeyBackupData, + key_data: &Raw, globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); @@ -174,10 +179,8 @@ impl KeyBackups { key.push(0xff); key.extend_from_slice(session_id.as_bytes()); - self.backupkeyid_backup.insert( - &key, - &serde_json::to_vec(&key_data).expect("KeyBackupData::to_vec always works"), - )?; + self.backupkeyid_backup + .insert(&key, key_data.json().get().as_bytes())?; Ok(()) } @@ -209,13 +212,13 @@ impl KeyBackups { &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>> { + ) -> Result, Raw>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::, RoomKeyBackup>::new(); + let mut rooms = BTreeMap::, Raw>::new(); for result in self .backupkeyid_backup @@ -241,7 +244,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup room_id is invalid room id.") })?; - let key_data = serde_json::from_slice(&value).map_err(|_| { + let key_data: serde_json::Value = serde_json::from_slice(&value).map_err(|_| { Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") })?; @@ -249,13 +252,25 @@ impl KeyBackups { }) { let (room_id, session_id, key_data) = result?; - rooms - .entry(room_id) - .or_insert_with(|| RoomKeyBackup { + let room_key_backup = rooms.entry(room_id).or_insert_with(|| { + Raw::new(&RoomKeyBackup { sessions: BTreeMap::new(), }) - .sessions - .insert(session_id, key_data); + .expect("RoomKeyBackup serialization") + }); + + let mut object = room_key_backup + .deserialize_as::>() + .map_err(|_| Error::bad_database("RoomKeyBackup is not an object"))?; + + let sessions = object.entry("session").or_insert_with(|| json!({})); + if let serde_json::Value::Object(unsigned_object) = sessions { + unsigned_object.insert(session_id, key_data); + } + + *room_key_backup = Raw::from_json( + serde_json::value::to_raw_value(&object).expect("Value => RawValue serialization"), + ); } Ok(rooms) @@ -266,7 +281,7 @@ impl KeyBackups { user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result> { + ) -> Result>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -304,7 +319,7 @@ impl KeyBackups { version: &str, room_id: &RoomId, session_id: &str, - ) -> Result> { + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); diff --git a/src/database/users.rs b/src/database/users.rs index d4bf4890..63a63f00 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -8,7 +8,12 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, UserId, }; -use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, + mem, + sync::Arc, +}; use tracing::warn; use super::abstraction::Tree; @@ -359,7 +364,7 @@ impl Users { user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, - one_time_key_value: &OneTimeKey, + one_time_key_value: &Raw, globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); @@ -409,7 +414,7 @@ impl Users { device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, - ) -> Result, OneTimeKey)>> { + ) -> Result, Raw)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); @@ -480,7 +485,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - device_keys: &DeviceKeys, + device_keys: &Raw, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { @@ -509,9 +514,9 @@ impl Users { pub fn add_cross_signing_keys( &self, user_id: &UserId, - master_key: &CrossSigningKey, - self_signing_key: &Option, - user_signing_key: &Option, + master_key: &Raw, + self_signing_key: &Option>, + user_signing_key: &Option>, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, ) -> Result<()> { @@ -521,7 +526,12 @@ impl Users { prefix.push(0xff); // Master key - let mut master_key_ids = master_key.keys.values(); + let master_key_map = master_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? + .keys; + let mut master_key_ids = master_key_map.values(); + let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Master key contained no key.", @@ -537,17 +547,21 @@ impl Users { let mut master_key_key = prefix.clone(); master_key_key.extend_from_slice(master_key_id.as_bytes()); - self.keyid_key.insert( - &master_key_key, - &serde_json::to_vec(&master_key).expect("CrossSigningKey::to_vec always works"), - )?; + self.keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes())?; self.userid_masterkeyid .insert(user_id.as_bytes(), &master_key_key)?; // Self-signing key if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key.keys.values(); + let self_signing_key_map = self_signing_key + .deserialize() + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") + })? + .keys; + let mut self_signing_key_ids = self_signing_key_map.values(); let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Self signing key contained no key.", @@ -565,8 +579,7 @@ impl Users { self.keyid_key.insert( &self_signing_key_key, - &serde_json::to_vec(&self_signing_key) - .expect("CrossSigningKey::to_vec always works"), + self_signing_key.json().get().as_bytes(), )?; self.userid_selfsigningkeyid @@ -575,7 +588,13 @@ impl Users { // User-signing key if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key.keys.values(); + let user_signing_key_map = user_signing_key + .deserialize() + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") + })? + .keys; + let mut user_signing_key_ids = user_signing_key_map.values(); let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "User signing key contained no key.", @@ -593,8 +612,7 @@ impl Users { self.keyid_key.insert( &user_signing_key_key, - &serde_json::to_vec(&user_signing_key) - .expect("CrossSigningKey::to_vec always works"), + user_signing_key.json().get().as_bytes(), )?; self.userid_usersigningkeyid @@ -727,7 +745,7 @@ impl Users { &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); @@ -744,25 +762,19 @@ impl Users { &self, user_id: &UserId, allowed_signatures: F, - ) -> Result> { + ) -> Result>> { self.userid_masterkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?; - - // A user is not allowed to see signatures from users other than himself and - // the target user - cross_signing_key.signatures = cross_signing_key - .signatures - .into_iter() - .filter(|(user, _)| allowed_signatures(user)) - .collect(); - - Ok(Some(cross_signing_key)) + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) }) }) } @@ -772,31 +784,25 @@ impl Users { &self, user_id: &UserId, allowed_signatures: F, - ) -> Result> { + ) -> Result>> { self.userid_selfsigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?; - - // A user is not allowed to see signatures from users other than himself and - // the target user - cross_signing_key.signatures = cross_signing_key - .signatures - .into_iter() - .filter(|(user, _)| user == user_id || allowed_signatures(user)) - .collect(); - - Ok(Some(cross_signing_key)) + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) }) }) } #[tracing::instrument(skip(self, user_id))] - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result> { + pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { self.userid_usersigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { @@ -991,3 +997,30 @@ impl Users { Ok(()) } } + +/// Ensure that a user only sees signatures from themselves and the target user +fn clean_signatures bool>( + cross_signing_key: &mut serde_json::Value, + user_id: &UserId, + allowed_signatures: F, +) -> Result<(), Error> { + if let Some(signatures) = cross_signing_key + .get_mut("signatures") + .and_then(|v| v.as_object_mut()) + { + // Don't allocate for the full size of the current signatures, but require + // at most one resize if nothing is dropped + let new_capacity = signatures.len() / 2; + for (user, signature) in + mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) + { + let id = <&UserId>::try_from(user.as_str()) + .map_err(|_| Error::bad_database("Invalid user ID in database."))?; + if id == user_id || allowed_signatures(id) { + signatures.insert(user, signature); + } + } + } + + Ok(()) +} From a889e884e684aa433772d8d61ee965c062a38790 Mon Sep 17 00:00:00 2001 From: Tglman Date: Thu, 23 Dec 2021 22:16:40 +0000 Subject: [PATCH 034/445] refactor:moved key watch wake logic to specific module --- Cargo.toml | 2 +- src/database/abstraction.rs | 3 ++ src/database/abstraction/heed.rs | 48 ++++--------------------- src/database/abstraction/sqlite.rs | 46 ++++-------------------- src/database/abstraction/watchers.rs | 54 ++++++++++++++++++++++++++++ 5 files changed, 70 insertions(+), 83 deletions(-) create mode 100644 src/database/abstraction/watchers.rs diff --git a/Cargo.toml b/Cargo.toml index 02159e31..ceae6ae9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -87,7 +87,7 @@ sha-1 = "0.9.8" default = ["conduit_bin", "backend_sqlite"] backend_sled = ["sled"] backend_sqlite = ["sqlite"] -backend_heed = ["heed", "crossbeam"] +backend_heed = ["heed", "crossbeam", "parking_lot"] sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 11bbc3b1..67b80d1a 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -12,6 +12,9 @@ pub mod sqlite; #[cfg(feature = "heed")] pub mod heed; +#[cfg(any(feature = "sqlite", feature = "heed"))] +pub mod watchers; + pub trait DatabaseEngine: Sized { fn open(config: &Config) -> Result>; fn open_tree(self: &Arc, name: &'static str) -> Result>; diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs index e767e22b..83dafc57 100644 --- a/src/database/abstraction/heed.rs +++ b/src/database/abstraction/heed.rs @@ -1,15 +1,13 @@ -use super::super::Config; +use super::{super::Config, watchers::Watchers}; use crossbeam::channel::{bounded, Sender as ChannelSender}; use threadpool::ThreadPool; use crate::{Error, Result}; use std::{ - collections::HashMap, future::Future, pin::Pin, - sync::{Arc, Mutex, RwLock}, + sync::{Arc, Mutex}, }; -use tokio::sync::oneshot::Sender; use super::{DatabaseEngine, Tree}; @@ -23,7 +21,7 @@ pub struct Engine { pub struct EngineTree { engine: Arc, tree: Arc, - watchers: RwLock, Vec>>>, + watchers: Watchers, } fn convert_error(error: heed::Error) -> Error { @@ -60,7 +58,7 @@ impl DatabaseEngine for Engine { .create_database(Some(name)) .map_err(convert_error)?, ), - watchers: RwLock::new(HashMap::new()), + watchers: Default::default(), })) } @@ -145,29 +143,7 @@ impl Tree for EngineTree { .put(&mut txn, &key, &value) .map_err(convert_error)?; txn.commit().map_err(convert_error)?; - - let watchers = self.watchers.read().unwrap(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write().unwrap(); - for prefix in triggered { - if let Some(txs) = watchers.remove(prefix) { - for tx in txs { - let _ = tx.send(()); - } - } - } - }; - + self.watchers.wake(key); Ok(()) } @@ -223,18 +199,6 @@ impl Tree for EngineTree { #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.watchers - .write() - .unwrap() - .entry(prefix.to_vec()) - .or_default() - .push(tx); - - Box::pin(async move { - // Tx is never destroyed - rx.await.unwrap(); - }) + self.watchers.watch(prefix) } } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 1d2038c5..1e6a2d89 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -1,17 +1,15 @@ -use super::{DatabaseEngine, Tree}; +use super::{watchers::Watchers, DatabaseEngine, Tree}; use crate::{database::Config, Result}; -use parking_lot::{Mutex, MutexGuard, RwLock}; +use parking_lot::{Mutex, MutexGuard}; use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; use std::{ cell::RefCell, - collections::{hash_map, HashMap}, future::Future, path::{Path, PathBuf}, pin::Pin, sync::Arc, }; use thread_local::ThreadLocal; -use tokio::sync::watch; use tracing::debug; thread_local! { @@ -113,7 +111,7 @@ impl DatabaseEngine for Engine { Ok(Arc::new(SqliteTable { engine: Arc::clone(self), name: name.to_owned(), - watchers: RwLock::new(HashMap::new()), + watchers: Watchers::default(), })) } @@ -126,7 +124,7 @@ impl DatabaseEngine for Engine { pub struct SqliteTable { engine: Arc, name: String, - watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, + watchers: Watchers, } type TupleOfBytes = (Vec, Vec); @@ -200,27 +198,7 @@ impl Tree for SqliteTable { let guard = self.engine.write_lock(); self.insert_with_guard(&guard, key, value)?; drop(guard); - - let watchers = self.watchers.read(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write(); - for prefix in triggered { - if let Some(tx) = watchers.remove(prefix) { - let _ = tx.0.send(()); - } - } - }; - + self.watchers.wake(key); Ok(()) } @@ -365,19 +343,7 @@ impl Tree for SqliteTable { #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let mut rx = match self.watchers.write().entry(prefix.to_vec()) { - hash_map::Entry::Occupied(o) => o.get().1.clone(), - hash_map::Entry::Vacant(v) => { - let (tx, rx) = tokio::sync::watch::channel(()); - v.insert((tx, rx.clone())); - rx - } - }; - - Box::pin(async move { - // Tx is never destroyed - rx.changed().await.unwrap(); - }) + self.watchers.watch(prefix) } #[tracing::instrument(skip(self))] diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs new file mode 100644 index 00000000..404f3f06 --- /dev/null +++ b/src/database/abstraction/watchers.rs @@ -0,0 +1,54 @@ +use parking_lot::RwLock; +use std::{ + collections::{hash_map, HashMap}, + future::Future, + pin::Pin, +}; +use tokio::sync::watch; + +#[derive(Default)] +pub(super) struct Watchers { + watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, +} + +impl Watchers { + pub(super) fn watch<'a>( + &'a self, + prefix: &[u8], + ) -> Pin + Send + 'a>> { + let mut rx = match self.watchers.write().entry(prefix.to_vec()) { + hash_map::Entry::Occupied(o) => o.get().1.clone(), + hash_map::Entry::Vacant(v) => { + let (tx, rx) = tokio::sync::watch::channel(()); + v.insert((tx, rx.clone())); + rx + } + }; + + Box::pin(async move { + // Tx is never destroyed + rx.changed().await.unwrap(); + }) + } + pub(super) fn wake(&self, key: &[u8]) { + let watchers = self.watchers.read(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write(); + for prefix in triggered { + if let Some(tx) = watchers.remove(prefix) { + let _ = tx.0.send(()); + } + } + }; + } +} From 7c1b2625cf8f315bced5e560574c0c64eedd368f Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Fri, 24 Dec 2021 23:06:54 +0100 Subject: [PATCH 035/445] Prepare to add an option to list local user accounts from your homeserver --- src/database/admin.rs | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/database/admin.rs b/src/database/admin.rs index 1e5c47c9..5ea872e8 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -13,6 +13,7 @@ use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), ListAppservices, + CountUsers, SendMessage(RoomMessageEventContent), } @@ -93,6 +94,16 @@ impl Admin { let state_lock = mutex_state.lock().await; match event { + AdminCommand::CountUsers => { + // count() does not return an error on failure... + if let Ok(usercount) = guard.users.count() { + let message = format!("Found {} total user accounts", usercount); + send_message(RoomMessageEventContent::text_plain(message), guard, &state_lock); + } else { + // ... so we simply spit out a generic non-explaining-info in case count() did not return Ok() + send_message(RoomMessageEventContent::text_plain("Unable to count users"), guard, &state_lock); + } + } AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } From 567cf6dbe970ee5422cd38439498f7e5a86b89ac Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 25 Dec 2021 20:51:22 +0100 Subject: [PATCH 036/445] Add command count_local_users to database/rooms.rs --- src/database/rooms.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index fb9ecbf0..0236c839 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1531,6 +1531,9 @@ impl Rooms { "list_appservices" => { db.admin.send(AdminCommand::ListAppservices); } + "count_local_users" => { + db.admin.send(AdminCommand::CountUsers); + } "get_auth_chain" => { if args.len() == 1 { if let Ok(event_id) = EventId::parse_arc(args[0]) { From d21030566c174509c4030d2f6428ffe1109e6c1d Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 25 Dec 2021 21:29:03 +0100 Subject: [PATCH 037/445] Rename/Add count methods to count_local_users --- src/database/admin.rs | 2 +- src/database/users.rs | 12 ++++++++++++ 2 files changed, 13 insertions(+), 1 deletion(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 5ea872e8..b18e50c3 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -96,7 +96,7 @@ impl Admin { match event { AdminCommand::CountUsers => { // count() does not return an error on failure... - if let Ok(usercount) = guard.users.count() { + if let Ok(usercount) = guard.users.count_local_users() { let message = format!("Found {} total user accounts", usercount); send_message(RoomMessageEventContent::text_plain(message), guard, &state_lock); } else { diff --git a/src/database/users.rs b/src/database/users.rs index d4bf4890..5a32f16a 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -77,11 +77,23 @@ impl Users { } /// Returns the number of users registered on this server. + /// It really returns all users, not only real ones with a + /// password to login but also bridge puppets... #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } + /// This method will only count those local user accounts with + /// a password thus returning only real accounts on this instance. + #[tracing::instrument(skip(self))] + pub fn count_local_users(&self) -> Result { + self.userid_password.iter().map(|(key, value)| { + + }); + Ok(1) + } + /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { From 2281bcefc631e02c83800297a4838e127ded7973 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 26 Dec 2021 11:06:28 +0100 Subject: [PATCH 038/445] Finalize count_local_users function --- src/database/users.rs | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index 5a32f16a..1e103fa2 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -88,10 +88,8 @@ impl Users { /// a password thus returning only real accounts on this instance. #[tracing::instrument(skip(self))] pub fn count_local_users(&self) -> Result { - self.userid_password.iter().map(|(key, value)| { - - }); - Ok(1) + let n = self.userid_password.iter().filter(|(_, bytes)| bytes.len() > 0).count(); + Ok(n) } /// Find out which user an access token belongs to. From 39787b41cb341ca3d270cc00c9ac46b8f4bd384d Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 26 Dec 2021 12:04:38 +0100 Subject: [PATCH 039/445] Rename admin command CountUsers -> CountLocalUsers; Update comments --- src/database/admin.rs | 12 ++++++------ src/database/rooms.rs | 2 +- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index b18e50c3..330fecb1 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -13,7 +13,7 @@ use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), ListAppservices, - CountUsers, + CountLocalUsers, SendMessage(RoomMessageEventContent), } @@ -94,14 +94,14 @@ impl Admin { let state_lock = mutex_state.lock().await; match event { - AdminCommand::CountUsers => { - // count() does not return an error on failure... + AdminCommand::CountLocalUsers => { + // count_local_users() only returns with OK(x) where x is the number of found accounts if let Ok(usercount) = guard.users.count_local_users() { - let message = format!("Found {} total user accounts", usercount); + let message = format!("Found {} local user account(s)", usercount); send_message(RoomMessageEventContent::text_plain(message), guard, &state_lock); } else { - // ... so we simply spit out a generic non-explaining-info in case count() did not return Ok() - send_message(RoomMessageEventContent::text_plain("Unable to count users"), guard, &state_lock); + // if count_local_users() only returns with OK(x), then why is this? ;-) + send_message(RoomMessageEventContent::text_plain("Unable to count local users"), guard, &state_lock); } } AdminCommand::RegisterAppservice(yaml) => { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0236c839..b1dd103c 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1532,7 +1532,7 @@ impl Rooms { db.admin.send(AdminCommand::ListAppservices); } "count_local_users" => { - db.admin.send(AdminCommand::CountUsers); + db.admin.send(AdminCommand::CountLocalUsers); } "get_auth_chain" => { if args.len() == 1 { From a69eb277d46d074d2bb4fef82f4111f70845f874 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 26 Dec 2021 20:00:31 +0100 Subject: [PATCH 040/445] Update count users: It's now list_local_users and contains the number and the usernames --- src/database/admin.rs | 22 ++++++++++++---------- src/database/rooms.rs | 4 ++-- src/database/users.rs | 14 ++++++++++++++ 3 files changed, 28 insertions(+), 12 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 330fecb1..58d9e83e 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -13,7 +13,7 @@ use tracing::warn; pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), ListAppservices, - CountLocalUsers, + ListLocalUsers, SendMessage(RoomMessageEventContent), } @@ -94,15 +94,17 @@ impl Admin { let state_lock = mutex_state.lock().await; match event { - AdminCommand::CountLocalUsers => { - // count_local_users() only returns with OK(x) where x is the number of found accounts - if let Ok(usercount) = guard.users.count_local_users() { - let message = format!("Found {} local user account(s)", usercount); - send_message(RoomMessageEventContent::text_plain(message), guard, &state_lock); - } else { - // if count_local_users() only returns with OK(x), then why is this? ;-) - send_message(RoomMessageEventContent::text_plain("Unable to count local users"), guard, &state_lock); - } + AdminCommand::ListLocalUsers => { + // collect all local users + let users = guard.users.iter_locals(); + + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + + // send number of local users as plain text: + // TODO: send as Markdown + send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); + } AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b1dd103c..4d839d38 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1531,8 +1531,8 @@ impl Rooms { "list_appservices" => { db.admin.send(AdminCommand::ListAppservices); } - "count_local_users" => { - db.admin.send(AdminCommand::CountLocalUsers); + "list_local_users" => { + db.admin.send(AdminCommand::ListLocalUsers); } "get_auth_chain" => { if args.len() == 1 { diff --git a/src/database/users.rs b/src/database/users.rs index 1e103fa2..d3e1fe43 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -84,6 +84,8 @@ impl Users { Ok(self.userid_password.iter().count()) } + /// The method is DEPRECATED and was replaced by iter_locals() + /// /// This method will only count those local user accounts with /// a password thus returning only real accounts on this instance. #[tracing::instrument(skip(self))] @@ -92,6 +94,7 @@ impl Users { Ok(n) } + /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { @@ -131,6 +134,17 @@ impl Users { }) } + /// Returns a vector of local usernames + #[tracing::instrument(skip(self))] + pub fn iter_locals(&self) -> Vec { + self.userid_password.iter().filter(|(_, pw)| pw.len() > 0).map(|(username, _)| { + match utils::string_from_bytes(&username) { + Ok(s) => s, + Err(e) => e.to_string() + } + }).collect::>() + } + /// Returns the password hash for the given user. #[tracing::instrument(skip(self, user_id))] pub fn password_hash(&self, user_id: &UserId) -> Result> { From 8d51359668199585175e7e0095de66e96bc1a3e1 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 26 Dec 2021 20:49:19 +0100 Subject: [PATCH 041/445] Fix typo and remove unneeded newline --- src/database/admin.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 58d9e83e..5418f53a 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -95,7 +95,7 @@ impl Admin { match event { AdminCommand::ListLocalUsers => { - // collect all local users + // collect local users only let users = guard.users.iter_locals(); let mut msg: String = format!("Found {} local user account(s):\n", users.len()); @@ -104,7 +104,6 @@ impl Admin { // send number of local users as plain text: // TODO: send as Markdown send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); - } AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error From b746f17e562ba02d9471d23c42c9bb8c9f4ee070 Mon Sep 17 00:00:00 2001 From: Ticho 34782694 Date: Fri, 7 Jan 2022 13:06:21 +0000 Subject: [PATCH 042/445] Make traefik+nginx config more self-contained The nginx instance which is serving the .well-known endpoints can serve the simple JSON replies directly from memory, instead of having them as external files on disk. --- docker/README.md | 30 ++++++++++++------------------ 1 file changed, 12 insertions(+), 18 deletions(-) diff --git a/docker/README.md b/docker/README.md index 19d9dca6..1f38d66a 100644 --- a/docker/README.md +++ b/docker/README.md @@ -94,26 +94,20 @@ So...step by step: server_name .; listen 80 default_server; - location /.well-known/matrix/ { - root /var/www; - default_type application/json; - add_header Access-Control-Allow-Origin *; + location /.well-known/matrix/server { + return 200 '{"m.server": ".:443"}'; + add_header Content-Type application/json; } - } - ``` - - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.homeserver": { - "base_url": "https://." - } - } - ``` - - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.server": ".:443" + location /.well-known/matrix/client { + return 200 '{"m.homeserver": {"base_url": "https://."}}'; + add_header Content-Type application/json; + add_header "Access-Control-Allow-Origin" *; + } + + location / { + return 404; + } } ``` From 349865d3ccb9ee78c9410de28e0d5d8c043ae0c8 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 13 Jan 2022 11:44:23 +0100 Subject: [PATCH 043/445] Upgrade Ruma --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- src/client_server/message.rs | 4 ++-- 3 files changed, 21 insertions(+), 21 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 69a026b2..07cae94e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1990,7 +1990,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "assign", "js_int", @@ -2011,7 +2011,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "bytes", "http", @@ -2027,7 +2027,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2038,7 +2038,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "ruma-api", "ruma-common", @@ -2052,7 +2052,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "assign", "bytes", @@ -2072,7 +2072,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "indexmap", "js_int", @@ -2087,7 +2087,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "indoc", "js_int", @@ -2103,7 +2103,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2114,7 +2114,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "js_int", "ruma-api", @@ -2129,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2143,7 +2143,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2153,7 +2153,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "thiserror", ] @@ -2161,7 +2161,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "js_int", "ruma-api", @@ -2174,7 +2174,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "js_int", "ruma-api", @@ -2189,7 +2189,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "bytes", "form_urlencoded", @@ -2203,7 +2203,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2214,7 +2214,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2231,7 +2231,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=7cf3abbaf02995b03db74429090ca5af1cd71edc#7cf3abbaf02995b03db74429090ca5af1cd71edc" +source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ "itertools 0.10.1", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 7b3432c2..5e09dee0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "7cf3abbaf02995b03db74429090ca5af1cd71edc", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f8ba7f795765bf4aeb4db06849f9fdde9c162ac3", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 60c756a3..da6ae875 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -160,7 +160,7 @@ pub async fn get_message_events_route( .collect(); let mut resp = get_message_events::Response::new(); - resp.start = Some(body.from.to_owned()); + resp.start = body.from.to_owned(); resp.end = end_token; resp.chunk = events_after; resp.state = Vec::new(); @@ -190,7 +190,7 @@ pub async fn get_message_events_route( .collect(); let mut resp = get_message_events::Response::new(); - resp.start = Some(body.from.to_owned()); + resp.start = body.from.to_owned(); resp.end = start_token; resp.chunk = events_before; resp.state = Vec::new(); From cf54185a1cfe6b7cbed4c8c472198360aa705663 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 13 Jan 2022 11:48:18 +0100 Subject: [PATCH 044/445] Use struct literals for consistency --- src/client_server/context.rs | 25 +++++++++++++------------ src/client_server/message.rs | 22 ++++++++++++---------- src/client_server/unversioned.rs | 9 ++++----- 3 files changed, 29 insertions(+), 27 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 97fc4fd8..9bfec9e1 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -92,18 +92,19 @@ pub async fn get_context_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let mut resp = get_context::Response::new(); - resp.start = start_token; - resp.end = end_token; - resp.events_before = events_before; - resp.event = Some(base_event); - resp.events_after = events_after; - resp.state = db // TODO: State at event - .rooms - .room_state_full(&body.room_id)? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(); + let resp = get_context::Response { + start: start_token, + end: end_token, + events_before, + event: Some(base_event), + events_after, + state: db // TODO: State at event + .rooms + .room_state_full(&body.room_id)? + .values() + .map(|pdu| pdu.to_state_event()) + .collect(), + }; Ok(resp.into()) } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index da6ae875..cbce019e 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -159,11 +159,12 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let mut resp = get_message_events::Response::new(); - resp.start = body.from.to_owned(); - resp.end = end_token; - resp.chunk = events_after; - resp.state = Vec::new(); + let resp = get_message_events::Response { + start: body.from.to_owned(), + end: end_token, + chunk: events_after, + state: Vec::new(), + }; Ok(resp.into()) } @@ -189,11 +190,12 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let mut resp = get_message_events::Response::new(); - resp.start = body.from.to_owned(); - resp.end = start_token; - resp.chunk = events_before; - resp.state = Vec::new(); + let resp = get_message_events::Response { + start: body.from.to_owned(), + end: start_token, + chunk: events_before, + state: Vec::new(), + }; Ok(resp.into()) } diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index f2624bbc..f17d8cf3 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -17,11 +17,10 @@ use rocket::get; #[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] #[tracing::instrument] pub async fn get_supported_versions_route() -> ConduitResult { - let mut resp = - get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]); - - resp.unstable_features - .insert("org.matrix.e2e_cross_signing".to_owned(), true); + let resp = get_supported_versions::Response { + versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], + unstable_features: [("org.matrix.e2e_cross_signing".to_owned(), true)].into(), + }; Ok(resp.into()) } From 84862352bacd7172602f1b8200a774d668a9f087 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 13 Jan 2022 11:48:40 +0100 Subject: [PATCH 045/445] Replace to_string calls on string literals with to_owned --- src/database/uiaa.rs | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 772dab9e..5e11467e 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -154,11 +154,7 @@ impl Uiaa { .write() .unwrap() .insert( - ( - user_id.to_owned(), - device_id.to_owned(), - session.to_string(), - ), + (user_id.to_owned(), device_id.to_owned(), session.to_owned()), request.to_owned(), ); @@ -175,11 +171,7 @@ impl Uiaa { .userdevicesessionid_uiaarequest .read() .unwrap() - .get(&( - user_id.to_owned(), - device_id.to_owned(), - session.to_string(), - )) + .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) .map(|j| j.to_owned())) } From bcf4ede0bc356efb4bd8b8909ca3db0ab157f97e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 13 Jan 2022 12:06:20 +0100 Subject: [PATCH 046/445] Restore compatibility with Rust 1.53 --- src/client_server/unversioned.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index f17d8cf3..ea685b4b 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,3 +1,5 @@ +use std::{collections::BTreeMap, iter::FromIterator}; + use crate::ConduitResult; use ruma::api::client::unversioned::get_supported_versions; @@ -19,7 +21,7 @@ use rocket::get; pub async fn get_supported_versions_route() -> ConduitResult { let resp = get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], - unstable_features: [("org.matrix.e2e_cross_signing".to_owned(), true)].into(), + unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; Ok(resp.into()) From eecd664c43c652f7fe4afc06154b346fc6a45b58 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Thu, 13 Jan 2022 12:26:23 +0100 Subject: [PATCH 047/445] Reformat code --- src/database/appservice.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 847d7479..88de1f33 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -28,17 +28,17 @@ impl Appservice { } /// Remove an appservice registration - /// + /// /// # Arguments - /// + /// /// * `service_name` - the name you send to register the service previously pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { self.id_appserviceregistrations .remove(service_name.as_bytes())?; - self.cached_registrations. - write(). - unwrap(). - remove(service_name); + self.cached_registrations + .write() + .unwrap() + .remove(service_name); Ok(()) } From 1d647a1a9a0a3075ee1bdbe2a039d22ee73baa2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 16 Oct 2021 15:19:25 +0200 Subject: [PATCH 048/445] improvement: allow rocksdb again --- Cargo.lock | 723 +++++++++++++++++----------- Cargo.toml | 4 +- src/database.rs | 11 +- src/database/abstraction.rs | 5 +- src/database/abstraction/rocksdb.rs | 183 +++++++ src/database/abstraction/sqlite.rs | 14 +- src/error.rs | 6 + src/utils.rs | 11 + 8 files changed, 662 insertions(+), 295 deletions(-) create mode 100644 src/database/abstraction/rocksdb.rs diff --git a/Cargo.lock b/Cargo.lock index 07cae94e..794445f9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -10,9 +10,9 @@ checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" [[package]] name = "ahash" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ "getrandom 0.2.3", "once_cell", @@ -78,9 +78,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" dependencies = [ "proc-macro2", "quote", @@ -89,9 +89,9 @@ dependencies = [ [[package]] name = "atomic" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" dependencies = [ "autocfg", ] @@ -146,6 +146,25 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.59.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -174,15 +193,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "bytemuck" -version = "1.7.2" +version = "1.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b" +checksum = "439989e6b8c38d1b6570a384ef1e49c8848128f5a97f3914baef02920842712f" [[package]] name = "byteorder" @@ -198,13 +217,22 @@ checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" -version = "1.0.70" +version = "1.0.72" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" +checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" dependencies = [ "jobserver", ] +[[package]] +name = "cexpr" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] + [[package]] name = "cfg-if" version = "0.1.10" @@ -230,6 +258,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "clang-sys" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" +dependencies = [ + "glob", + "libc", + "libloading", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -259,11 +298,12 @@ dependencies = [ "reqwest", "ring", "rocket", + "rocksdb", "ruma", "rusqlite", "rust-argon2", - "rustls", - "rustls-native-certs", + "rustls 0.19.1", + "rustls-native-certs 0.5.0", "serde", "serde_json", "serde_yaml", @@ -275,22 +315,22 @@ dependencies = [ "tokio", "tracing", "tracing-flame", - "tracing-subscriber", + "tracing-subscriber 0.2.25", "trust-dns-resolver", "webpki 0.22.0", ] [[package]] name = "const-oid" -version = "0.6.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c32f031ea41b4291d695026c023b95d59db2d8a2c7640800ed56bc8f510f22" +checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" [[package]] name = "const_fn" -version = "0.4.8" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" +checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" [[package]] name = "constant_time_eq" @@ -311,9 +351,9 @@ dependencies = [ [[package]] name = "core-foundation" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" dependencies = [ "core-foundation-sys", "libc", @@ -321,9 +361,9 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" @@ -336,9 +376,9 @@ dependencies = [ [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" dependencies = [ "cfg-if 1.0.0", ] @@ -353,18 +393,18 @@ dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", - "crossbeam-queue 0.3.2", - "crossbeam-utils 0.8.5", + "crossbeam-queue 0.3.3", + "crossbeam-utils 0.8.6", ] [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", ] [[package]] @@ -375,17 +415,17 @@ checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", ] [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", "lazy_static", "memoffset", "scopeguard", @@ -402,12 +442,12 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" +checksum = "b979d76c9fcb84dffc80a73f7290da0f83e4c95773494674cb44b76d13a7a110" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", ] [[package]] @@ -422,9 +462,9 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" +checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" dependencies = [ "cfg-if 1.0.0", "lazy_static", @@ -471,9 +511,9 @@ dependencies = [ [[package]] name = "der" -version = "0.4.1" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e21d2d0f22cde6e88694108429775c0219760a07779bf96503b434a03d7412" +checksum = "79b71cca7d95d7681a4b3b9cdf63c8dbc3730d0584c2c74e31416d64a90493f4" dependencies = [ "const-oid", ] @@ -546,17 +586,11 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" -[[package]] -name = "dtoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" - [[package]] name = "ed25519" -version = "1.2.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" dependencies = [ "signature", ] @@ -583,9 +617,9 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.28" +version = "0.8.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" dependencies = [ "cfg-if 1.0.0", ] @@ -614,6 +648,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" +[[package]] +name = "fastrand" +version = "1.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" +dependencies = [ + "instant", +] + [[package]] name = "figment" version = "0.10.6" @@ -656,9 +699,9 @@ dependencies = [ [[package]] name = "futures" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" dependencies = [ "futures-channel", "futures-core", @@ -671,9 +714,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" dependencies = [ "futures-core", "futures-sink", @@ -681,15 +724,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" dependencies = [ "futures-core", "futures-task", @@ -698,18 +741,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" dependencies = [ - "autocfg", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -717,23 +758,22 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" dependencies = [ - "autocfg", "futures-channel", "futures-core", "futures-io", @@ -743,8 +783,6 @@ dependencies = [ "memchr", "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] @@ -772,9 +810,9 @@ dependencies = [ [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" dependencies = [ "typenum", "version_check", @@ -804,9 +842,9 @@ dependencies = [ [[package]] name = "gif" -version = "0.11.2" +version = "0.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a668f699973d0f573d15749b7002a9ac9e1f9c6b220e7b165601334c173d8de" +checksum = "c3a7187e78088aead22ceedeee99779455b23fc231fe13ec443f99bb71694e5b" dependencies = [ "color_quant", "weezl", @@ -820,9 +858,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.4" +version = "0.3.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" dependencies = [ "bytes", "fnv", @@ -932,20 +970,20 @@ dependencies = [ [[package]] name = "http" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa 0.4.8", + "itoa 1.0.1", ] [[package]] name = "http-body" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" +checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" dependencies = [ "bytes", "http", @@ -960,15 +998,15 @@ checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.12" +version = "0.14.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" +checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" dependencies = [ "bytes", "futures-channel", @@ -981,7 +1019,7 @@ dependencies = [ "httpdate", "itoa 0.4.8", "pin-project-lite", - "socket2 0.4.1", + "socket2 0.4.2", "tokio", "tower-service", "tracing", @@ -990,17 +1028,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.22.1" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ - "futures-util", + "http", "hyper", - "log", - "rustls", + "rustls 0.20.2", "tokio", - "tokio-rustls", - "webpki 0.21.4", + "tokio-rustls 0.23.2", ] [[package]] @@ -1033,9 +1069,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" dependencies = [ "autocfg", "hashbrown", @@ -1053,15 +1089,15 @@ dependencies = [ [[package]] name = "inlinable_string" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3094308123a0e9fd59659ce45e22de9f53fc1d2ac6e1feb9fef988e4f76cad77" +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" [[package]] name = "instant" -version = "0.1.10" +version = "0.1.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" +checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" dependencies = [ "cfg-if 1.0.0", ] @@ -1092,18 +1128,9 @@ checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" [[package]] name = "itertools" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] - -[[package]] -name = "itertools" -version = "0.10.1" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" dependencies = [ "either", ] @@ -1137,9 +1164,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.53" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4bf49d50e2961077d9c99f4b7997d770a1114f087c3c2e0069b36c13fc2979d" +checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" dependencies = [ "wasm-bindgen", ] @@ -1173,11 +1200,39 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" -version = "0.2.101" +version = "0.2.112" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" + +[[package]] +name = "libloading" +version = "0.7.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" +dependencies = [ + "cfg-if 1.0.0", + "winapi", +] + +[[package]] +name = "librocksdb-sys" +version = "6.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" +dependencies = [ + "bindgen", + "cc", + "glob", + "libc", +] [[package]] name = "libsqlite3-sys" @@ -1227,15 +1282,17 @@ dependencies = [ [[package]] name = "loom" -version = "0.5.1" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2111607c723d7857e0d8299d5ce7a0bf4b844d3e44f8de136b13da513eaf8fc4" +checksum = "edc5c7d328e32cc4954e8e01193d7f0ef5ab257b5090b70a964e099a36034309" dependencies = [ "cfg-if 1.0.0", "generator", "scoped-tls", "serde", "serde_json", + "tracing", + "tracing-subscriber 0.3.5", ] [[package]] @@ -1268,6 +1325,15 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchers" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +dependencies = [ + "regex-automata", +] + [[package]] name = "matches" version = "0.1.9" @@ -1282,9 +1348,9 @@ checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] @@ -1295,6 +1361,12 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" version = "0.3.7" @@ -1306,9 +1378,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" dependencies = [ "libc", "log", @@ -1328,9 +1400,9 @@ dependencies = [ [[package]] name = "multer" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "408327e2999b839cd1af003fc01b2019a6c10a1361769542203f6fedc5179680" +checksum = "5f8f35e687561d5c1667590911e6698a8cb714a134a7505718a182e7bc9d3836" dependencies = [ "bytes", "encoding_rs", @@ -1338,11 +1410,22 @@ dependencies = [ "http", "httparse", "log", + "memchr", "mime", "spin 0.9.2", "tokio", "tokio-util", - "twoway", + "version_check", +] + +[[package]] +name = "nom" +version = "7.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +dependencies = [ + "memchr", + "minimal-lexical", "version_check", ] @@ -1409,9 +1492,9 @@ dependencies = [ [[package]] name = "num_cpus" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ "hermit-abi", "libc", @@ -1419,9 +1502,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.8.0" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" [[package]] name = "opaque-debug" @@ -1431,9 +1514,9 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" @@ -1545,6 +1628,12 @@ dependencies = [ "syn", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "pem" version = "0.8.3" @@ -1564,18 +1653,18 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "pin-project" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" dependencies = [ "proc-macro2", "quote", @@ -1584,9 +1673,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" [[package]] name = "pin-utils" @@ -1596,9 +1685,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.7.5" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbee84ed13e44dd82689fa18348a49934fa79cc774a344c42fc9b301c71b140a" +checksum = "ee3ef9b64d26bad0536099c816c6734379e45bbd5f14798def6809e5cc350447" dependencies = [ "der", "spki", @@ -1607,9 +1696,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" [[package]] name = "png" @@ -1625,15 +1714,15 @@ dependencies = [ [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-crate" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" +checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" dependencies = [ "thiserror", "toml", @@ -1645,17 +1734,11 @@ version = "0.5.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" -[[package]] -name = "proc-macro-nested" -version = "0.1.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" - [[package]] name = "proc-macro2" -version = "1.0.29" +version = "1.0.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" dependencies = [ "unicode-xid", ] @@ -1681,9 +1764,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.9" +version = "1.0.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" dependencies = [ "proc-macro2", ] @@ -1845,15 +1928,16 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.4" +version = "0.11.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" +checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" dependencies = [ "base64 0.13.0", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", @@ -1865,12 +1949,14 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls", - "rustls-native-certs", + "rustls 0.20.2", + "rustls-native-certs 0.6.1", + "rustls-pemfile", "serde", + "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls", + "tokio-rustls 0.23.2", "tokio-socks", "url", "wasm-bindgen", @@ -1983,10 +2069,20 @@ dependencies = [ "state", "time 0.2.27", "tokio", - "tokio-rustls", + "tokio-rustls 0.22.0", "uncased", ] +[[package]] +name = "rocksdb" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" +dependencies = [ + "libc", + "librocksdb-sys", +] + [[package]] name = "ruma" version = "0.4.0" @@ -2233,7 +2329,7 @@ name = "ruma-state-res" version = "0.4.1" source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" dependencies = [ - "itertools 0.10.1", + "itertools", "js_int", "ruma-common", "ruma-events", @@ -2247,9 +2343,9 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.3" +version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57adcf67c8faaf96f3248c2a7b419a0dbc52ebe36ba83dd57fe83827c1ea4eb3" +checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" dependencies = [ "bitflags", "fallible-iterator", @@ -2269,9 +2365,15 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", ] +[[package]] +name = "rustc-hash" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" + [[package]] name = "rustc_version" version = "0.2.3" @@ -2290,10 +2392,22 @@ dependencies = [ "base64 0.13.0", "log", "ring", - "sct", + "sct 0.6.1", "webpki 0.21.4", ] +[[package]] +name = "rustls" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +dependencies = [ + "log", + "ring", + "sct 0.7.0", + "webpki 0.22.0", +] + [[package]] name = "rustls-native-certs" version = "0.5.0" @@ -2301,22 +2415,43 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" dependencies = [ "openssl-probe", - "rustls", + "rustls 0.19.1", "schannel", "security-framework", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + +[[package]] +name = "rustls-pemfile" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64 0.13.0", +] + [[package]] name = "rustversion" -version = "1.0.5" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" [[package]] name = "schannel" @@ -2350,6 +2485,16 @@ dependencies = [ "untrusted", ] +[[package]] +name = "sct" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" +dependencies = [ + "ring", + "untrusted", +] + [[package]] name = "security-framework" version = "2.4.2" @@ -2390,18 +2535,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.130" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.133" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" dependencies = [ "proc-macro2", "quote", @@ -2410,9 +2555,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.73" +version = "1.0.74" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bcbd0344bc6533bc7ec56df11d42fb70f1b912351c0825ccb7211b59d8af7cf5" +checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" dependencies = [ "itoa 1.0.1", "ryu", @@ -2433,12 +2578,12 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.20" +version = "0.8.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad104641f3c958dab30eb3010e834c2622d1f3f4c530fef1dee20ad9485f3c09" +checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" dependencies = [ - "dtoa", "indexmap", + "ryu", "serde", "yaml-rust", ] @@ -2464,9 +2609,9 @@ checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" [[package]] name = "sha2" -version = "0.9.6" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9204c41a1597a8c5af23c82d1c921cb01ec0a4c59e07a9c7306062829a3903f3" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer", "cfg-if 1.0.0", @@ -2477,13 +2622,19 @@ dependencies = [ [[package]] name = "sharded-slab" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740223c51853f3145fe7c90360d2d4232f2b62e3449489c207eccde818979982" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -2495,9 +2646,9 @@ dependencies = [ [[package]] name = "signature" -version = "1.3.1" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" +checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" [[package]] name = "simple_asn1" @@ -2512,19 +2663,19 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" +checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" [[package]] name = "sled" -version = "0.34.6" +version = "0.34.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" +checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" dependencies = [ "crc32fast", "crossbeam-epoch", - "crossbeam-utils 0.8.5", + "crossbeam-utils 0.8.6", "fs2", "fxhash", "libc", @@ -2535,9 +2686,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.6.1" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" +checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" [[package]] name = "socket2" @@ -2552,9 +2703,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" +checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" dependencies = [ "libc", "winapi", @@ -2574,9 +2725,9 @@ checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" [[package]] name = "spki" -version = "0.4.0" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "987637c5ae6b3121aba9d513f869bd2bff11c4cc086c22473befd6649c0bd521" +checksum = "5c01a0c15da1b0b0e1494112e7af814a678fec9bd157881b49beac661e9b6f32" dependencies = [ "der", ] @@ -2665,9 +2816,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.75" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f58f7e8eaa0009c5fec437aabf511bd9933e4b2d7407bd05273c01a8906ea7" +checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" dependencies = [ "proc-macro2", "quote", @@ -2685,9 +2836,9 @@ dependencies = [ [[package]] name = "synstructure" -version = "0.12.5" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", @@ -2697,13 +2848,13 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.2.0" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" +checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" dependencies = [ "cfg-if 1.0.0", + "fastrand", "libc", - "rand 0.8.4", "redox_syscall", "remove_dir_all", "winapi", @@ -2711,18 +2862,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "283d5230e63df9608ac7d9691adc1dfb6e701225436eb64d0b9a7f0a5a04f6ec" +checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.28" +version = "1.0.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa3884228611f5cd3608e2d409bf7dce832e4eb3135e3f11addbd7e41bd68e71" +checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" dependencies = [ "proc-macro2", "quote", @@ -2810,9 +2961,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.3.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" dependencies = [ "tinyvec_macros", ] @@ -2825,11 +2976,10 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.11.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4efe6fc2395938c8155973d7be49fe8d03a843726e285e100a8a383cc0154ce" +checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" dependencies = [ - "autocfg", "bytes", "libc", "memchr", @@ -2844,9 +2994,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.3.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" +checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" dependencies = [ "proc-macro2", "quote", @@ -2859,11 +3009,22 @@ version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" dependencies = [ - "rustls", + "rustls 0.19.1", "tokio", "webpki 0.21.4", ] +[[package]] +name = "tokio-rustls" +version = "0.23.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +dependencies = [ + "rustls 0.20.2", + "tokio", + "webpki 0.22.0", +] + [[package]] name = "tokio-socks" version = "0.5.1" @@ -2878,9 +3039,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" +checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" dependencies = [ "futures-core", "pin-project-lite", @@ -2889,9 +3050,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.6.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" dependencies = [ "bytes", "futures-core", @@ -2918,9 +3079,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.26" +version = "0.1.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if 1.0.0", "pin-project-lite", @@ -2930,9 +3091,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.15" +version = "0.1.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" dependencies = [ "proc-macro2", "quote", @@ -2941,9 +3102,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.19" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ca517f43f0fb96e0c3072ed5c275fe5eece87e8cb52f4a77b69226d3b1c9df8" +checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" dependencies = [ "lazy_static", ] @@ -2956,7 +3117,7 @@ checksum = "bd520fe41c667b437952383f3a1ec14f1fa45d653f719a77eedd6e6a02d8fa54" dependencies = [ "lazy_static", "tracing", - "tracing-subscriber", + "tracing-subscriber 0.2.25", ] [[package]] @@ -2982,14 +3143,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.2.20" +version = "0.2.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cbe87a2fa7e35900ce5de20220a582a9483a7063811defce79d7cbd59d4cfe" +checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" dependencies = [ "ansi_term", "chrono", "lazy_static", - "matchers", + "matchers 0.0.1", "regex", "serde", "serde_json", @@ -3002,6 +3163,24 @@ dependencies = [ "tracing-serde", ] +[[package]] +name = "tracing-subscriber" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d81bfa81424cc98cb034b837c985b7a290f592e5b4322f353f94a0ab0f9f594" +dependencies = [ + "ansi_term", + "lazy_static", + "matchers 0.1.0", + "regex", + "sharded-slab", + "smallvec", + "thread_local", + "tracing", + "tracing-core", + "tracing-log", +] + [[package]] name = "trust-dns-proto" version = "0.20.3" @@ -3053,21 +3232,11 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "twoway" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c57ffb460d7c24cd6eda43694110189030a3d1dfe418416d9468fd1c1d290b47" -dependencies = [ - "memchr", - "unchecked-index", -] - [[package]] name = "typenum" -version = "1.14.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "ubyte" @@ -3088,17 +3257,11 @@ dependencies = [ "version_check", ] -[[package]] -name = "unchecked-index" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeba86d422ce181a719445e51872fa30f1f7413b62becb52e95ec91aa262d85c" - [[package]] name = "unicode-bidi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" [[package]] name = "unicode-normalization" @@ -3153,9 +3316,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" @@ -3181,21 +3344,19 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce9b1b516211d33767048e5d47fa2a381ed8b76fc48d2ce4aa39877f9f183e0" +checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" dependencies = [ "cfg-if 1.0.0", - "serde", - "serde_json", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe8dc78e2326ba5f845f4b5bf548401604fa20b1dd1d365fb73b6c1d6364041" +checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" dependencies = [ "bumpalo", "lazy_static", @@ -3208,9 +3369,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.26" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fded345a6559c2cfee778d562300c581f7d4ff3edb9b0d230d69800d213972" +checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3220,9 +3381,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44468aa53335841d9d6b6c023eaab07c0cd4bddbcfdee3e2bb1e8d2cb8069fef" +checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3230,9 +3391,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0195807922713af1e67dc66132c7328206ed9766af3858164fb583eedc25fbad" +checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" dependencies = [ "proc-macro2", "quote", @@ -3243,15 +3404,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.76" +version = "0.2.78" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdb075a845574a1fa5f09fd77e43f7747599301ea3417a9fbffdeedfc1f4a29" +checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" [[package]] name = "web-sys" -version = "0.3.53" +version = "0.3.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224b2f6b67919060055ef1a67807367c2066ed520c3862cc013d26cf893a783c" +checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" dependencies = [ "js-sys", "wasm-bindgen", @@ -3352,18 +3513,18 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zeroize" -version = "1.4.1" +version = "1.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" +checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.1.0" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +checksum = "65f1a51723ec88c66d5d1fe80c841f17f63587d6691901d66be9bec6c3b51f73" dependencies = [ "proc-macro2", "quote", @@ -3373,18 +3534,18 @@ dependencies = [ [[package]] name = "zstd" -version = "0.5.4+zstd.1.4.7" +version = "0.9.2+zstd.1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" +checksum = "2390ea1bf6c038c39674f22d95f0564725fc06034a47129179810b2fc58caa54" dependencies = [ "zstd-safe", ] [[package]] name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" +version = "4.1.3+zstd.1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" +checksum = "e99d81b99fb3c2c2c794e3fe56c305c63d5173a16a46b5850b07c935ffc7db79" dependencies = [ "libc", "zstd-sys", @@ -3392,12 +3553,10 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" +version = "1.6.2+zstd.1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +checksum = "2daf2f248d9ea44454bfcb2516534e8b8ad2fc91bf818a1885495fc42bc8ac9f" dependencies = [ "cc", - "glob", - "itertools 0.9.0", "libc", ] diff --git a/Cargo.toml b/Cargo.toml index 5e09dee0..5cc6a83c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,6 +78,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } +rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true } thread_local = "1.1.3" # used for TURN server authentication hmac = "0.11.0" @@ -87,7 +88,8 @@ sha-1 = "0.9.8" default = ["conduit_bin", "backend_sqlite"] backend_sled = ["sled"] backend_sqlite = ["sqlite"] -backend_heed = ["heed", "crossbeam", "parking_lot"] +backend_heed = ["heed", "crossbeam"] +backend_rocksdb = ["rocksdb"] sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional diff --git a/src/database.rs b/src/database.rs index 8b29b221..4c377f06 100644 --- a/src/database.rs +++ b/src/database.rs @@ -154,6 +154,9 @@ pub type Engine = abstraction::sqlite::Engine; #[cfg(feature = "heed")] pub type Engine = abstraction::heed::Engine; +#[cfg(feature = "rocksdb")] +pub type Engine = abstraction::rocksdb::Engine; + pub struct Database { _db: Arc, pub globals: globals::Globals, @@ -314,10 +317,10 @@ impl Database { .expect("pdu cache capacity fits into usize"), )), auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), - shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), - eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), - shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), - statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), + shorteventid_cache: Mutex::new(LruCache::new(100_000_000)), + eventidshort_cache: Mutex::new(LruCache::new(100_000_000)), + shortstatekey_cache: Mutex::new(LruCache::new(100_000_000)), + statekeyshort_cache: Mutex::new(LruCache::new(100_000_000)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), stateinfo_cache: Mutex::new(LruCache::new(1000)), diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 67b80d1a..a347f831 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -12,7 +12,10 @@ pub mod sqlite; #[cfg(feature = "heed")] pub mod heed; -#[cfg(any(feature = "sqlite", feature = "heed"))] +#[cfg(feature = "rocksdb")] +pub mod rocksdb; + +#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed"))] pub mod watchers; pub trait DatabaseEngine: Sized { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs new file mode 100644 index 00000000..3ff6ab86 --- /dev/null +++ b/src/database/abstraction/rocksdb.rs @@ -0,0 +1,183 @@ +use super::super::Config; +use crate::{utils, Result}; + +use std::{future::Future, pin::Pin, sync::Arc}; + +use super::{DatabaseEngine, Tree}; + +use std::{collections::HashMap, sync::RwLock}; + +pub struct Engine { + rocks: rocksdb::DBWithThreadMode, + old_cfs: Vec, +} + +pub struct RocksDbEngineTree<'a> { + db: Arc, + name: &'a str, + watchers: Watchers, +} + +impl DatabaseEngine for Engine { + fn open(config: &Config) -> Result> { + let mut db_opts = rocksdb::Options::default(); + db_opts.create_if_missing(true); + db_opts.set_max_open_files(16); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); + db_opts.set_target_file_size_base(256 << 20); + db_opts.set_write_buffer_size(256 << 20); + + let mut block_based_options = rocksdb::BlockBasedOptions::default(); + block_based_options.set_block_size(512 << 10); + db_opts.set_block_based_table_factory(&block_based_options); + + let cfs = rocksdb::DBWithThreadMode::::list_cf( + &db_opts, + &config.database_path, + ) + .unwrap_or_default(); + + let db = rocksdb::DBWithThreadMode::::open_cf_descriptors( + &db_opts, + &config.database_path, + cfs.iter().map(|name| { + let mut options = rocksdb::Options::default(); + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); + options.set_prefix_extractor(prefix_extractor); + options.set_merge_operator_associative("increment", utils::increment_rocksdb); + + rocksdb::ColumnFamilyDescriptor::new(name, options) + }), + )?; + + Ok(Arc::new(Engine { + rocks: db, + old_cfs: cfs, + })) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + if !self.old_cfs.contains(&name.to_owned()) { + // Create if it didn't exist + let mut options = rocksdb::Options::default(); + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); + options.set_prefix_extractor(prefix_extractor); + options.set_merge_operator_associative("increment", utils::increment_rocksdb); + + let _ = self.rocks.create_cf(name, &options); + println!("created cf"); + } + + Ok(Arc::new(RocksDbEngineTree { + name, + db: Arc::clone(self), + watchers: Watchers::default(), + })) + } + + fn flush(self: &Arc) -> Result<()> { + // TODO? + Ok(()) + } +} + +impl RocksDbEngineTree<'_> { + fn cf(&self) -> rocksdb::BoundColumnFamily<'_> { + self.db.rocks.cf_handle(self.name).unwrap() + } +} + +impl Tree for RocksDbEngineTree<'_> { + fn get(&self, key: &[u8]) -> Result>> { + Ok(self.db.rocks.get_cf(self.cf(), key)?) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.db.rocks.put_cf(self.cf(), key, value)?; + self.watchers.wake(key); + Ok(()) + } + + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + for (key, value) in iter { + self.db.rocks.put_cf(self.cf(), key, value)?; + } + + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + Ok(self.db.rocks.delete_cf(self.cf(), key)?) + } + + fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf(self.cf(), rocksdb::IteratorMode::Start) + .map(|(k, v)| (Vec::from(k), Vec::from(v))), + ) + } + + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf( + self.cf(), + rocksdb::IteratorMode::From( + from, + if backwards { + rocksdb::Direction::Reverse + } else { + rocksdb::Direction::Forward + }, + ), + ) + .map(|(k, v)| (Vec::from(k), Vec::from(v))), + ) + } + + fn increment(&self, key: &[u8]) -> Result> { + // TODO: make atomic + let old = self.db.rocks.get_cf(self.cf(), &key)?; + let new = utils::increment(old.as_deref()).unwrap(); + self.db.rocks.put_cf(self.cf(), key, &new)?; + Ok(new) + } + + fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + for key in iter { + let old = self.db.rocks.get_cf(self.cf(), &key)?; + let new = utils::increment(old.as_deref()).unwrap(); + self.db.rocks.put_cf(self.cf(), key, new)?; + } + + Ok(()) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf( + self.cf(), + rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), + ) + .map(|(k, v)| (Vec::from(k), Vec::from(v))) + .take_while(move |(k, _)| k.starts_with(&prefix)), + ) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + self.watchers.watch(prefix) + } +} diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 1e6a2d89..31875667 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -132,7 +132,7 @@ type TupleOfBytes = (Vec, Vec); impl SqliteTable { #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { - //dbg!(&self.name); + dbg!(&self.name); Ok(guard .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? .query_row([key], |row| row.get(0)) @@ -141,7 +141,7 @@ impl SqliteTable { #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { - //dbg!(&self.name); + dbg!(&self.name); guard.execute( format!( "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", @@ -168,14 +168,14 @@ impl SqliteTable { let statement_ref = NonAliasingBox(statement); - //let name = self.name.clone(); + let name = self.name.clone(); let iterator = Box::new( statement .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - //dbg!(&name); + dbg!(&name); r.unwrap() }), ); @@ -263,7 +263,7 @@ impl Tree for SqliteTable { let guard = self.engine.read_lock_iterator(); let from = from.to_vec(); // TODO change interface? - //let name = self.name.clone(); + let name = self.name.clone(); if backwards { let statement = Box::leak(Box::new( @@ -282,7 +282,7 @@ impl Tree for SqliteTable { .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - //dbg!(&name); + dbg!(&name); r.unwrap() }), ); @@ -307,7 +307,7 @@ impl Tree for SqliteTable { .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - //dbg!(&name); + dbg!(&name); r.unwrap() }), ); diff --git a/src/error.rs b/src/error.rs index 7faddc91..4d427da4 100644 --- a/src/error.rs +++ b/src/error.rs @@ -39,6 +39,12 @@ pub enum Error { #[cfg(feature = "heed")] #[error("There was a problem with the connection to the heed database: {error}")] HeedError { error: String }, + #[cfg(feature = "rocksdb")] + #[error("There was a problem with the connection to the rocksdb database: {source}")] + RocksDbError { + #[from] + source: rocksdb::Error, + }, #[error("Could not generate an image.")] ImageError { #[from] diff --git a/src/utils.rs b/src/utils.rs index 26d71a8c..4702d051 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -29,6 +29,17 @@ pub fn increment(old: Option<&[u8]>) -> Option> { Some(number.to_be_bytes().to_vec()) } +#[cfg(feature = "rocksdb")] +pub fn increment_rocksdb( + _new_key: &[u8], + old: Option<&[u8]>, + _operands: &mut rocksdb::MergeOperands, +) -> Option> { + dbg!(_new_key); + dbg!(old); + increment(old) +} + pub fn generate_keypair() -> Vec { let mut value = random_string(8).as_bytes().to_vec(); value.push(0xff); From a30b588ede6135642946afd575a2411c6d0d21e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 10 Dec 2021 21:34:45 +0100 Subject: [PATCH 049/445] rocksdb as default --- Cargo.toml | 2 +- src/database.rs | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 5cc6a83c..0a2b4459 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,7 +85,7 @@ hmac = "0.11.0" sha-1 = "0.9.8" [features] -default = ["conduit_bin", "backend_sqlite"] +default = ["conduit_bin", "backend_rocksdb"] backend_sled = ["sled"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] diff --git a/src/database.rs b/src/database.rs index 4c377f06..af6136b3 100644 --- a/src/database.rs +++ b/src/database.rs @@ -317,10 +317,10 @@ impl Database { .expect("pdu cache capacity fits into usize"), )), auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), - shorteventid_cache: Mutex::new(LruCache::new(100_000_000)), - eventidshort_cache: Mutex::new(LruCache::new(100_000_000)), - shortstatekey_cache: Mutex::new(LruCache::new(100_000_000)), - statekeyshort_cache: Mutex::new(LruCache::new(100_000_000)), + shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), + eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), + shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), + statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), stateinfo_cache: Mutex::new(LruCache::new(1000)), From c9c99746412155fcdce6a6430bd5ef9c567cc3fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Dec 2021 14:52:19 +0100 Subject: [PATCH 050/445] fix: stack overflows when fetching auth events --- src/database/abstraction/rocksdb.rs | 18 ++-- src/server_server.rs | 145 +++++++++++++++------------- 2 files changed, 88 insertions(+), 75 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 3ff6ab86..825c02e0 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -22,14 +22,20 @@ impl DatabaseEngine for Engine { fn open(config: &Config) -> Result> { let mut db_opts = rocksdb::Options::default(); db_opts.create_if_missing(true); - db_opts.set_max_open_files(16); + db_opts.set_max_open_files(512); db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.set_compression_type(rocksdb::DBCompressionType::Snappy); - db_opts.set_target_file_size_base(256 << 20); - db_opts.set_write_buffer_size(256 << 20); + db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + db_opts.set_target_file_size_base(2 << 22); + db_opts.set_max_bytes_for_level_base(2 << 24); + db_opts.set_max_bytes_for_level_multiplier(2.0); + db_opts.set_num_levels(8); + db_opts.set_write_buffer_size(2 << 27); + + let rocksdb_cache = rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize).unwrap(); let mut block_based_options = rocksdb::BlockBasedOptions::default(); - block_based_options.set_block_size(512 << 10); + block_based_options.set_block_size(2 << 19); + block_based_options.set_block_cache(&rocksdb_cache); db_opts.set_block_based_table_factory(&block_based_options); let cfs = rocksdb::DBWithThreadMode::::list_cf( @@ -45,7 +51,6 @@ impl DatabaseEngine for Engine { let mut options = rocksdb::Options::default(); let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); options.set_prefix_extractor(prefix_extractor); - options.set_merge_operator_associative("increment", utils::increment_rocksdb); rocksdb::ColumnFamilyDescriptor::new(name, options) }), @@ -63,7 +68,6 @@ impl DatabaseEngine for Engine { let mut options = rocksdb::Options::default(); let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); options.set_prefix_extractor(prefix_extractor); - options.set_merge_operator_associative("increment", utils::increment_rocksdb); let _ = self.rocks.create_cf(name, &options); println!("created cf"); diff --git a/src/server_server.rs b/src/server_server.rs index 594152ae..d6bc9b91 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1392,12 +1392,11 @@ async fn upgrade_outlier_to_timeline_pdu( let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - let k = db - .rooms - .get_statekey_from_short(k) - .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - - state.insert(k, id.clone()); + if let Ok(k) = db.rooms.get_statekey_from_short(k) { + state.insert(k, id.clone()); + } else { + warn!("Failed to get_statekey_from_short."); + } starting_events.push(id); } @@ -1755,11 +1754,16 @@ async fn upgrade_outlier_to_timeline_pdu( .into_iter() .map(|map| { map.into_iter() - .map(|(k, id)| db.rooms.get_statekey_from_short(k).map(|k| (k, id))) - .collect::>>() + .filter_map(|(k, id)| { + db.rooms + .get_statekey_from_short(k) + .map(|k| (k, id)) + .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) + .ok() + }) + .collect::>() }) - .collect::>() - .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; + .collect(); let state = match state_res::resolve( room_version_id, @@ -1871,73 +1875,78 @@ pub(crate) fn fetch_and_handle_outliers<'a>( // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - let local_pdu = db.rooms.get_pdu(id); - let pdu = match local_pdu { - Ok(Some(pdu)) => { + if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { + trace!("Found {} in db", id); + pdus.push((local_pdu, None)); + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![id]; + let mut events_in_reverse_order = Vec::new(); + while let Some(next_id) = todo_auth_events.pop() { + if let Ok(Some(_)) = db.rooms.get_pdu(next_id) { trace!("Found {} in db", id); - (pdu, None) + continue; } - Ok(None) => { - // c. Ask origin server over federation - warn!("Fetching {} over federation.", id); - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: id }, - ) - .await - { - Ok(res) => { - warn!("Got {} over federation", id); - let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu) { - Ok(t) => t, - Err(_) => { - back_off((**id).to_owned()); - continue; - } - }; - if calculated_event_id != **id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - id, calculated_event_id, &res.pdu); - } - - // This will also fetch the auth chain - match handle_outlier_pdu( - origin, - create_event, - id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => (pdu, Some(json)), - Err(e) => { - warn!("Authentication of event {} failed: {:?}", id, e); - back_off((**id).to_owned()); + warn!("Fetching {} over federation.", next_id); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: next_id }, + ) + .await + { + Ok(res) => { + warn!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match crate::pdu::gen_event_id_canonical_json(&res.pdu) { + Ok(t) => t, + Err(_) => { + back_off((**next_id).to_owned()); continue; } - } - } - Err(_) => { - warn!("Failed to fetch event: {}", id); - back_off((**id).to_owned()); - continue; + }; + + if calculated_event_id != **next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); } + + events_in_reverse_order.push((next_id, value)); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((**next_id).to_owned()); } } - Err(e) => { - warn!("Error loading {}: {}", id, e); - continue; + } + + while let Some((next_id, value)) = events_in_reverse_order.pop() { + match handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + db, + pub_key_map, + ) + .await + { + Ok((pdu, json)) => { + pdus.push((pdu, Some(json))); + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()); + } } - }; - pdus.push(pdu); + } } pdus }) From 4b4afea2abb4289d6fa31e02bd2be2799f51e0ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Dec 2021 15:54:42 +0100 Subject: [PATCH 051/445] fix auth event fetching --- src/server_server.rs | 31 +++++++++++++++++++++++-------- 1 file changed, 23 insertions(+), 8 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index d6bc9b91..28c3ea07 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1878,15 +1878,16 @@ pub(crate) fn fetch_and_handle_outliers<'a>( if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { trace!("Found {} in db", id); pdus.push((local_pdu, None)); + continue; } // c. Ask origin server over federation // We also handle its auth chain here so we don't get a stack overflow in // handle_outlier_pdu. - let mut todo_auth_events = vec![id]; + let mut todo_auth_events = vec![Arc::clone(id)]; let mut events_in_reverse_order = Vec::new(); while let Some(next_id) = todo_auth_events.pop() { - if let Ok(Some(_)) = db.rooms.get_pdu(next_id) { + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { trace!("Found {} in db", id); continue; } @@ -1897,7 +1898,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .send_federation_request( &db.globals, origin, - get_event::v1::Request { event_id: next_id }, + get_event::v1::Request { event_id: &next_id }, ) .await { @@ -1907,21 +1908,35 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match crate::pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { - back_off((**next_id).to_owned()); + back_off((*next_id).to_owned()); continue; } }; - if calculated_event_id != **next_id { + if calculated_event_id != *next_id { warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", next_id, calculated_event_id, &res.pdu); } + + if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { + for auth_event in auth_events { + if let Some(Ok(auth_event)) = auth_event.as_str() + .map(|e| serde_json::from_str(e)) { + todo_auth_events.push(auth_event); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + events_in_reverse_order.push((next_id, value)); } Err(_) => { warn!("Failed to fetch event: {}", next_id); - back_off((**next_id).to_owned()); + back_off((*next_id).to_owned()); } } } @@ -1930,7 +1945,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match handle_outlier_pdu( origin, create_event, - next_id, + &next_id, room_id, value.clone(), db, @@ -1943,7 +1958,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } Err(e) => { warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()); + back_off((*next_id).to_owned()); } } } From 74951cb239b5ec7ef41ba080729bc93df046fb66 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Dec 2021 21:42:53 +0100 Subject: [PATCH 052/445] dbg --- src/server_server.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 28c3ea07..b6bea0c5 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1922,8 +1922,9 @@ pub(crate) fn fetch_and_handle_outliers<'a>( if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { for auth_event in auth_events { if let Some(Ok(auth_event)) = auth_event.as_str() - .map(|e| serde_json::from_str(e)) { - todo_auth_events.push(auth_event); + .map(|e| {let ev: std::result::Result, _> = dbg!(serde_json::from_str(dbg!(e))); ev}) { + let a: Arc = auth_event; + todo_auth_events.push(a); } else { warn!("Auth event id is not valid"); } From 83a9095cdc3febd617d9bfd2d8cacf0fe3e89990 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 16 Dec 2021 22:25:24 +0100 Subject: [PATCH 053/445] fix? --- src/server_server.rs | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index b6bea0c5..8c5c09f2 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1921,8 +1921,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { for auth_event in auth_events { - if let Some(Ok(auth_event)) = auth_event.as_str() - .map(|e| {let ev: std::result::Result, _> = dbg!(serde_json::from_str(dbg!(e))); ev}) { + if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { let a: Arc = auth_event; todo_auth_events.push(a); } else { From ee3d2db8e061bcdac43674aa050bcd3aad79d4a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 19 Dec 2021 10:48:28 +0100 Subject: [PATCH 054/445] improvement, maybe not safe --- src/server_server.rs | 25 ++++--------------------- 1 file changed, 4 insertions(+), 21 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 8c5c09f2..57f55867 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1686,25 +1686,6 @@ async fn upgrade_outlier_to_timeline_pdu( // We do this by adding the current state to the list of fork states extremity_sstatehashes.remove(¤t_sstatehash); fork_states.push(current_state_ids); - dbg!(&extremity_sstatehashes); - - for (sstatehash, leaf_pdu) in extremity_sstatehashes { - let mut leaf_state = db - .rooms - .state_full_ids(sstatehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &leaf_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::from(&*leaf_pdu.event_id)); - // Now it's the state after the pdu - } - - fork_states.push(leaf_state); - } // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); @@ -1941,7 +1922,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } } - while let Some((next_id, value)) = events_in_reverse_order.pop() { + for (next_id, value) in events_in_reverse_order { match handle_outlier_pdu( origin, create_event, @@ -1954,7 +1935,9 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .await { Ok((pdu, json)) => { - pdus.push((pdu, Some(json))); + if next_id == *id { + pdus.push((pdu, Some(json))); + } } Err(e) => { warn!("Authentication of event {} failed: {:?}", next_id, e); From b1d9ec3efccafaf887da1b54e4b3ef2bfa4d84a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 20 Dec 2021 10:16:22 +0100 Subject: [PATCH 055/445] fix: atomic increment --- Cargo.toml | 2 +- src/database/abstraction/rocksdb.rs | 24 ++++++++++++++++-------- src/database/abstraction/watchers.rs | 8 ++++---- 3 files changed, 21 insertions(+), 13 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0a2b4459..6241b6a8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,7 +90,7 @@ backend_sled = ["sled"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] -sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] +sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional [[bin]] diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 825c02e0..b2142dfe 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,11 +1,6 @@ -use super::super::Config; +use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree}; use crate::{utils, Result}; - -use std::{future::Future, pin::Pin, sync::Arc}; - -use super::{DatabaseEngine, Tree}; - -use std::{collections::HashMap, sync::RwLock}; +use std::{future::Future, pin::Pin, sync::Arc, collections::HashMap, sync::RwLock}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, @@ -16,6 +11,7 @@ pub struct RocksDbEngineTree<'a> { db: Arc, name: &'a str, watchers: Watchers, + write_lock: RwLock<()> } impl DatabaseEngine for Engine { @@ -77,6 +73,7 @@ impl DatabaseEngine for Engine { name, db: Arc::clone(self), watchers: Watchers::default(), + write_lock: RwLock::new(()), })) } @@ -98,8 +95,12 @@ impl Tree for RocksDbEngineTree<'_> { } fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let lock = self.write_lock.read().unwrap(); self.db.rocks.put_cf(self.cf(), key, value)?; + drop(lock); + self.watchers.wake(key); + Ok(()) } @@ -148,20 +149,27 @@ impl Tree for RocksDbEngineTree<'_> { } fn increment(&self, key: &[u8]) -> Result> { - // TODO: make atomic + let lock = self.write_lock.write().unwrap(); + let old = self.db.rocks.get_cf(self.cf(), &key)?; let new = utils::increment(old.as_deref()).unwrap(); self.db.rocks.put_cf(self.cf(), key, &new)?; + + drop(lock); Ok(new) } fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + let lock = self.write_lock.write().unwrap(); + for key in iter { let old = self.db.rocks.get_cf(self.cf(), &key)?; let new = utils::increment(old.as_deref()).unwrap(); self.db.rocks.put_cf(self.cf(), key, new)?; } + drop(lock); + Ok(()) } diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index 404f3f06..fec1f27a 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -1,6 +1,6 @@ -use parking_lot::RwLock; use std::{ collections::{hash_map, HashMap}, + sync::RwLock, future::Future, pin::Pin, }; @@ -16,7 +16,7 @@ impl Watchers { &'a self, prefix: &[u8], ) -> Pin + Send + 'a>> { - let mut rx = match self.watchers.write().entry(prefix.to_vec()) { + let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { hash_map::Entry::Occupied(o) => o.get().1.clone(), hash_map::Entry::Vacant(v) => { let (tx, rx) = tokio::sync::watch::channel(()); @@ -31,7 +31,7 @@ impl Watchers { }) } pub(super) fn wake(&self, key: &[u8]) { - let watchers = self.watchers.read(); + let watchers = self.watchers.read().unwrap(); let mut triggered = Vec::new(); for length in 0..=key.len() { @@ -43,7 +43,7 @@ impl Watchers { drop(watchers); if !triggered.is_empty() { - let mut watchers = self.watchers.write(); + let mut watchers = self.watchers.write().unwrap(); for prefix in triggered { if let Some(tx) = watchers.remove(prefix) { let _ = tx.0.send(()); From 54f4d39e3ed92106ec3a902de22d2366cfd8e8be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 21 Dec 2021 16:02:12 +0100 Subject: [PATCH 056/445] improvement: don't fetch event multiple times --- src/database/abstraction/rocksdb.rs | 4 +++- src/server_server.rs | 17 +++++++++++++---- 2 files changed, 16 insertions(+), 5 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index b2142dfe..397047bd 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -27,7 +27,9 @@ impl DatabaseEngine for Engine { db_opts.set_num_levels(8); db_opts.set_write_buffer_size(2 << 27); - let rocksdb_cache = rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize).unwrap(); + let rocksdb_cache = + rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize) + .unwrap(); let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_size(2 << 19); diff --git a/src/server_server.rs b/src/server_server.rs index 57f55867..6e8ebf38 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1867,7 +1867,12 @@ pub(crate) fn fetch_and_handle_outliers<'a>( // handle_outlier_pdu. let mut todo_auth_events = vec![Arc::clone(id)]; let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); while let Some(next_id) = todo_auth_events.pop() { + if events_all.contains(&next_id) { + continue; + } + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { trace!("Found {} in db", id); continue; @@ -1899,10 +1904,13 @@ pub(crate) fn fetch_and_handle_outliers<'a>( next_id, calculated_event_id, &res.pdu); } - - if let Some(auth_events) = value.get("auth_events").and_then(|c| c.as_array()) { + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { for auth_event in auth_events { - if let Ok(auth_event) = serde_json::from_value(auth_event.clone().into()) { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { let a: Arc = auth_event; todo_auth_events.push(a); } else { @@ -1913,7 +1921,8 @@ pub(crate) fn fetch_and_handle_outliers<'a>( warn!("Auth event list invalid"); } - events_in_reverse_order.push((next_id, value)); + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); } Err(_) => { warn!("Failed to fetch event: {}", next_id); From 5bcc1324ed3936444ba189c399e906482cc67d3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 21 Dec 2021 22:10:31 +0100 Subject: [PATCH 057/445] fix: auth event fetch order --- src/server_server.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 6e8ebf38..c76afd34 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1931,7 +1931,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( } } - for (next_id, value) in events_in_reverse_order { + for (next_id, value) in events_in_reverse_order.iter().rev() { match handle_outlier_pdu( origin, create_event, @@ -1944,13 +1944,13 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .await { Ok((pdu, json)) => { - if next_id == *id { + if next_id == id { pdus.push((pdu, Some(json))); } } Err(e) => { warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((*next_id).to_owned()); + back_off((**next_id).to_owned()); } } } From 68e910bb77f7bbc93269dd1dfd0f70a26f1e8ef0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 4 Jan 2022 14:30:13 +0100 Subject: [PATCH 058/445] feat: lazy loading --- src/client_server/context.rs | 62 ++++++++++++++++--- src/client_server/message.rs | 83 +++++++++++++++++++------ src/client_server/sync.rs | 117 +++++++++++++++++++++++++++++++---- src/database.rs | 3 + src/database/rooms.rs | 96 +++++++++++++++++++++++++++- 5 files changed, 320 insertions(+), 41 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 9bfec9e1..94a44e39 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,5 +1,9 @@ use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; -use ruma::api::client::{error::ErrorKind, r0::context::get_context}; +use ruma::{ + api::client::{error::ErrorKind, r0::context::get_context}, + events::EventType, +}; +use std::collections::HashSet; use std::convert::TryFrom; #[cfg(feature = "conduit_bin")] @@ -21,6 +25,7 @@ pub async fn get_context_route( body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( @@ -29,6 +34,8 @@ pub async fn get_context_route( )); } + let mut lazy_loaded = HashSet::new(); + let base_pdu_id = db .rooms .get_pdu_id(&body.event_id)? @@ -45,8 +52,18 @@ pub async fn get_context_route( .ok_or(Error::BadRequest( ErrorKind::NotFound, "Base event not found.", - ))? - .to_room_event(); + ))?; + + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &base_event.sender, + )? { + lazy_loaded.insert(base_event.sender.clone()); + } + + let base_event = base_event.to_room_event(); let events_before: Vec<_> = db .rooms @@ -60,6 +77,17 @@ pub async fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect(); + for (_, event) in &events_before { + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + } + let start_token = events_before .last() .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) @@ -82,6 +110,17 @@ pub async fn get_context_route( .filter_map(|r| r.ok()) // Remove buggy events .collect(); + for (_, event) in &events_after { + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + } + let end_token = events_after .last() .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) @@ -92,18 +131,23 @@ pub async fn get_context_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); + let mut state = Vec::new(); + for ll_id in &lazy_loaded { + if let Some(member_event) = + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())? + { + state.push(member_event.to_state_event()); + } + } + let resp = get_context::Response { start: start_token, end: end_token, events_before, event: Some(base_event), events_after, - state: db // TODO: State at event - .rooms - .room_state_full(&body.room_id)? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(), + state, }; Ok(resp.into()) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index cbce019e..48ca4ae8 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -6,7 +6,11 @@ use ruma::{ }, events::EventType, }; -use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{ + collections::{BTreeMap, HashSet}, + convert::TryInto, + sync::Arc, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; @@ -117,6 +121,7 @@ pub async fn get_message_events_route( body: Ruma>, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( @@ -136,6 +141,12 @@ pub async fn get_message_events_route( // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); + let next_token; + + let mut resp = get_message_events::Response::new(); + + let mut lazy_loaded = HashSet::new(); + match body.dir { get_message_events::Direction::Forward => { let events_after: Vec<_> = db @@ -152,21 +163,27 @@ pub async fn get_message_events_route( .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect(); - let end_token = events_after.last().map(|(count, _)| count.to_string()); + for (_, event) in &events_after { + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + } + + next_token = events_after.last().map(|(count, _)| count).copied(); let events_after: Vec<_> = events_after .into_iter() .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let resp = get_message_events::Response { - start: body.from.to_owned(), - end: end_token, - chunk: events_after, - state: Vec::new(), - }; - - Ok(resp.into()) + resp.start = body.from.to_owned(); + resp.end = next_token.map(|count| count.to_string()); + resp.chunk = events_after; } get_message_events::Direction::Backward => { let events_before: Vec<_> = db @@ -183,21 +200,51 @@ pub async fn get_message_events_route( .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` .collect(); - let start_token = events_before.last().map(|(count, _)| count.to_string()); + for (_, event) in &events_before { + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + } + + next_token = events_before.last().map(|(count, _)| count).copied(); let events_before: Vec<_> = events_before .into_iter() .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let resp = get_message_events::Response { - start: body.from.to_owned(), - end: start_token, - chunk: events_before, - state: Vec::new(), - }; + resp.start = body.from.to_owned(); + resp.end = next_token.map(|count| count.to_string()); + resp.chunk = events_before; + } + } - Ok(resp.into()) + db.rooms + .lazy_load_confirm_delivery(&sender_user, &sender_device, &body.room_id, from)?; + resp.state = Vec::new(); + for ll_id in &lazy_loaded { + if let Some(member_event) = + db.rooms + .room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())? + { + resp.state.push(member_event.to_state_event()); } } + + if let Some(next_token) = next_token { + db.rooms.lazy_load_mark_sent( + &sender_user, + &sender_device, + &body.room_id, + lazy_loaded.into_iter().collect(), + next_token, + ); + } + + Ok(resp.into()) } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 64588a2c..88bf8614 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -264,6 +264,14 @@ async fn sync_helper( // limited unless there are events in non_timeline_pdus let limited = non_timeline_pdus.next().is_some(); + let mut timeline_users = HashSet::new(); + for (_, event) in &timeline_pdus { + timeline_users.insert(event.sender.as_str().to_owned()); + } + + db.rooms + .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?; + // Database queries: let current_shortstatehash = db @@ -344,14 +352,51 @@ async fn sync_helper( state_events, ) = if since_shortstatehash.is_none() { // Probably since = 0, we will do an initial sync + let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - let state_events: Vec<_> = current_state_ids - .iter() - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect(); + + let mut state_events = Vec::new(); + let mut lazy_loaded = Vec::new(); + + for (_, id) in current_state_ids { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + let state_key = pdu + .state_key + .as_ref() + .expect("state events have state keys"); + if pdu.kind != EventType::RoomMember { + state_events.push(pdu); + } else if full_state || timeline_users.contains(state_key) { + // TODO: check filter: is ll enabled? + lazy_loaded.push( + UserId::parse(state_key.as_ref()) + .expect("they are in timeline_users, so they should be correct"), + ); + state_events.push(pdu); + } + } + + // Reset lazy loading because this is an initial sync + db.rooms + .lazy_load_reset(&sender_user, &sender_device, &room_id)?; + + // The state_events above should contain all timeline_users, let's mark them as lazy + // loaded. + db.rooms.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batch, + ); ( heroes, @@ -387,20 +432,66 @@ async fn sync_helper( let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; - let state_events = if joined_since_last_sync { + /* + let state_events = if joined_since_last_sync || full_state { current_state_ids .iter() .map(|(_, id)| db.rooms.get_pdu(id)) .filter_map(|r| r.ok().flatten()) .collect::>() } else { - current_state_ids - .iter() - .filter(|(key, id)| since_state_ids.get(key) != Some(id)) - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect() - }; + */ + let mut state_events = Vec::new(); + let mut lazy_loaded = Vec::new(); + + for (key, id) in current_state_ids { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + let state_key = pdu + .state_key + .as_ref() + .expect("state events have state keys"); + + if pdu.kind != EventType::RoomMember { + if full_state || since_state_ids.get(&key) != Some(&id) { + state_events.push(pdu); + } + continue; + } + + // Pdu has to be a member event + let state_key_userid = UserId::parse(state_key.as_ref()) + .expect("they are in timeline_users, so they should be correct"); + + if full_state || since_state_ids.get(&key) != Some(&id) { + lazy_loaded.push(state_key_userid); + state_events.push(pdu); + } else if timeline_users.contains(state_key) + && !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &room_id, + &state_key_userid, + )? + { + lazy_loaded.push(state_key_userid); + state_events.push(pdu); + } + } + + db.rooms.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batch, + ); let encrypted_room = db .rooms diff --git a/src/database.rs b/src/database.rs index af6136b3..9e020198 100644 --- a/src/database.rs +++ b/src/database.rs @@ -288,6 +288,8 @@ impl Database { userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + lazyloadedids: builder.open_tree("lazyloadedids")?, + userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, @@ -323,6 +325,7 @@ impl Database { statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), + lazy_load_waiting: Mutex::new(HashMap::new()), stateinfo_cache: Mutex::new(LruCache::new(1000)), }, account_data: account_data::AccountData { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 775e2f8d..b957b55d 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -28,7 +28,7 @@ use ruma::{ push::{Action, Ruleset, Tweak}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, state_res::{self, RoomVersion, StateMap}, - uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, + uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use serde::Deserialize; use serde_json::value::to_raw_value; @@ -79,6 +79,8 @@ pub struct Rooms { pub(super) userroomid_leftstate: Arc, pub(super) roomuserid_leftcount: Arc, + pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 @@ -117,6 +119,8 @@ pub struct Rooms { pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock, Arc>>>>, pub(super) appservice_in_room_cache: RwLock, HashMap>>, + pub(super) lazy_load_waiting: + Mutex, Box, Box, u64), Vec>>>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -3466,4 +3470,94 @@ impl Rooms { Ok(()) } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&device_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(&room_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(&ll_user.as_bytes()); + Ok(self.lazyloadedids.get(&key)?.is_some()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_mark_sent( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + lazy_load: Vec>, + count: u64, + ) { + self.lazy_load_waiting.lock().unwrap().insert( + ( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + count, + ), + lazy_load, + ); + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + since: u64, + ) -> Result<()> { + if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + since, + )) { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(&room_id.as_bytes()); + prefix.push(0xff); + + for ll_id in user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(&ll_id.as_bytes()); + self.lazyloadedids.insert(&key, &[])?; + } + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_reset( + &self, + user_id: &Box, + device_id: &Box, + room_id: &Box, + ) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(&room_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.lazyloadedids.scan_prefix(prefix) { + self.lazyloadedids.remove(&key)?; + } + + Ok(()) + } } From 1bd9fd74b31383105526ea27b6df8d461aacc223 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Jan 2022 18:15:00 +0100 Subject: [PATCH 059/445] feat: partially support sync filters --- src/client_server/filter.rs | 57 ++++++++++++++++--------- src/client_server/message.rs | 5 ++- src/client_server/sync.rs | 83 ++++++++++++++++++++---------------- src/database.rs | 1 + src/database/users.rs | 48 ++++++++++++++++++++- 5 files changed, 133 insertions(+), 61 deletions(-) diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index dfb53770..f8845f1e 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -1,32 +1,47 @@ -use crate::{utils, ConduitResult}; -use ruma::api::client::r0::filter::{self, create_filter, get_filter}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + r0::filter::{create_filter, get_filter}, +}; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// -/// TODO: Loads a filter that was previously created. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] -#[tracing::instrument] -pub async fn get_filter_route() -> ConduitResult { - // TODO - Ok(get_filter::Response::new(filter::IncomingFilterDefinition { - event_fields: None, - event_format: filter::EventFormat::default(), - account_data: filter::IncomingFilter::default(), - room: filter::IncomingRoomFilter::default(), - presence: filter::IncomingFilter::default(), - }) - .into()) +/// Loads a filter that was previously created. +/// +/// - A user can only access their own filters +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/client/r0/user/<_>/filter/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn get_filter_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let filter = match db.users.get_filter(sender_user, &body.filter_id)? { + Some(filter) => filter, + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), + }; + + Ok(get_filter::Response::new(filter).into()) } /// # `PUT /_matrix/client/r0/user/{userId}/filter` /// -/// TODO: Creates a new filter to be used by other endpoints. -#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] -#[tracing::instrument] -pub async fn create_filter_route() -> ConduitResult { - // TODO - Ok(create_filter::Response::new(utils::random_string(10)).into()) +/// Creates a new filter to be used by other endpoints. +#[cfg_attr( + feature = "conduit_bin", + post("/_matrix/client/r0/user/<_>/filter", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn create_filter_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + Ok(create_filter::Response::new(db.users.create_filter(sender_user, &body.filter)?).into()) } diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 48ca4ae8..899c45a2 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -138,6 +138,9 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); + db.rooms + .lazy_load_confirm_delivery(&sender_user, &sender_device, &body.room_id, from)?; + // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); @@ -224,8 +227,6 @@ pub async fn get_message_events_route( } } - db.rooms - .lazy_load_confirm_delivery(&sender_user, &sender_device, &body.room_id, from)?; resp.state = Vec::new(); for ll_id in &lazy_loaded { if let Some(member_event) = diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 88bf8614..6d8ac28d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,6 +1,10 @@ use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; use ruma::{ - api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, + api::client::r0::{ + filter::{IncomingFilterDefinition, LazyLoadOptions}, + sync::sync_events, + uiaa::UiaaResponse, + }, events::{ room::member::{MembershipState, RoomMemberEventContent}, AnySyncEphemeralRoomEvent, EventType, @@ -77,34 +81,32 @@ pub async fn sync_events_route( Entry::Vacant(v) => { let (tx, rx) = tokio::sync::watch::channel(None); + v.insert((body.since.clone(), rx.clone())); + tokio::spawn(sync_helper_wrapper( Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), - body.since.clone(), - body.full_state, - body.timeout, + body, tx, )); - v.insert((body.since.clone(), rx)).1.clone() + rx } Entry::Occupied(mut o) => { if o.get().0 != body.since { let (tx, rx) = tokio::sync::watch::channel(None); + o.insert((body.since.clone(), rx.clone())); + tokio::spawn(sync_helper_wrapper( Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), - body.since.clone(), - body.full_state, - body.timeout, + body, tx, )); - o.insert((body.since.clone(), rx.clone())); - rx } else { o.get().1.clone() @@ -135,18 +137,16 @@ async fn sync_helper_wrapper( db: Arc, sender_user: Box, sender_device: Box, - since: Option, - full_state: bool, - timeout: Option, + body: sync_events::IncomingRequest, tx: Sender>>, ) { + let since = body.since.clone(); + let r = sync_helper( Arc::clone(&db), sender_user.clone(), sender_device.clone(), - since.clone(), - full_state, - timeout, + body, ) .await; @@ -179,9 +179,7 @@ async fn sync_helper( db: Arc, sender_user: Box, sender_device: Box, - since: Option, - full_state: bool, - timeout: Option, + body: sync_events::IncomingRequest, // bool = caching allowed ) -> Result<(sync_events::Response, bool), Error> { // TODO: match body.set_presence { @@ -193,8 +191,26 @@ async fn sync_helper( let next_batch = db.globals.current_count()?; let next_batch_string = next_batch.to_string(); + // Load filter + let filter = match body.filter { + None => IncomingFilterDefinition::default(), + Some(sync_events::IncomingFilter::FilterDefinition(filter)) => filter, + Some(sync_events::IncomingFilter::FilterId(filter_id)) => db + .users + .get_filter(&sender_user, &filter_id)? + .unwrap_or_default(), + }; + + let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members: redundant, + } => (true, redundant), + _ => (false, false), + }; + let mut joined_rooms = BTreeMap::new(); - let since = since + let since = body + .since .clone() .and_then(|string| string.parse().ok()) .unwrap_or(0); @@ -374,8 +390,10 @@ async fn sync_helper( .expect("state events have state keys"); if pdu.kind != EventType::RoomMember { state_events.push(pdu); - } else if full_state || timeline_users.contains(state_key) { - // TODO: check filter: is ll enabled? + } else if !lazy_load_enabled + || body.full_state + || timeline_users.contains(state_key) + { lazy_loaded.push( UserId::parse(state_key.as_ref()) .expect("they are in timeline_users, so they should be correct"), @@ -432,15 +450,6 @@ async fn sync_helper( let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; - /* - let state_events = if joined_since_last_sync || full_state { - current_state_ids - .iter() - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect::>() - } else { - */ let mut state_events = Vec::new(); let mut lazy_loaded = Vec::new(); @@ -459,7 +468,7 @@ async fn sync_helper( .expect("state events have state keys"); if pdu.kind != EventType::RoomMember { - if full_state || since_state_ids.get(&key) != Some(&id) { + if body.full_state || since_state_ids.get(&key) != Some(&id) { state_events.push(pdu); } continue; @@ -469,16 +478,16 @@ async fn sync_helper( let state_key_userid = UserId::parse(state_key.as_ref()) .expect("they are in timeline_users, so they should be correct"); - if full_state || since_state_ids.get(&key) != Some(&id) { + if body.full_state || since_state_ids.get(&key) != Some(&id) { lazy_loaded.push(state_key_userid); state_events.push(pdu); } else if timeline_users.contains(state_key) - && !db.rooms.lazy_load_was_sent_before( + && (!db.rooms.lazy_load_was_sent_before( &sender_user, &sender_device, &room_id, &state_key_userid, - )? + )? || lazy_load_send_redundant) { lazy_loaded.push(state_key_userid); state_events.push(pdu); @@ -858,7 +867,7 @@ async fn sync_helper( }; // TODO: Retry the endpoint instead of returning (waiting for #118) - if !full_state + if !body.full_state && response.rooms.is_empty() && response.presence.is_empty() && response.account_data.is_empty() @@ -867,7 +876,7 @@ async fn sync_helper( { // Hang a few seconds so requests are not spammed // Stop hanging if new info arrives - let mut duration = timeout.unwrap_or_default(); + let mut duration = body.timeout.unwrap_or_default(); if duration.as_secs() > 30 { duration = Duration::from_secs(30); } diff --git a/src/database.rs b/src/database.rs index 9e020198..ddf701bb 100644 --- a/src/database.rs +++ b/src/database.rs @@ -249,6 +249,7 @@ impl Database { userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, + userfilterid_filter: builder.open_tree("userfilterid_filter")?, todeviceid_events: builder.open_tree("todeviceid_events")?, }, uiaa: uiaa::Uiaa { diff --git a/src/database/users.rs b/src/database/users.rs index 63a63f00..c4fcee3d 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,6 +1,9 @@ use crate::{utils, Error, Result}; use ruma::{ - api::client::{error::ErrorKind, r0::device::Device}, + api::client::{ + error::ErrorKind, + r0::{device::Device, filter::IncomingFilterDefinition}, + }, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, EventType}, identifiers::MxcUri, @@ -36,6 +39,8 @@ pub struct Users { pub(super) userid_selfsigningkeyid: Arc, pub(super) userid_usersigningkeyid: Arc, + pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId + pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count } @@ -996,6 +1001,47 @@ impl Users { // TODO: Unhook 3PID Ok(()) } + + /// Creates a new sync filter. Returns the filter id. + #[tracing::instrument(skip(self))] + pub fn create_filter( + &self, + user_id: &UserId, + filter: &IncomingFilterDefinition, + ) -> Result { + let filter_id = utils::random_string(4); + + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(filter_id.as_bytes()); + + self.userfilterid_filter.insert( + &key, + &serde_json::to_vec(&filter).expect("filter is valid json"), + )?; + + Ok(filter_id) + } + + #[tracing::instrument(skip(self))] + pub fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(filter_id.as_bytes()); + + let raw = self.userfilterid_filter.get(&key)?; + + if let Some(raw) = raw { + serde_json::from_slice(&raw) + .map_err(|_| Error::bad_database("Invalid filter event in db.")) + } else { + Ok(None) + } + } } /// Ensure that a user only sees signatures from themselves and the target user From 93d225fd1ec186d1957c670b0e6f7f161888dd21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Jan 2022 20:31:20 +0100 Subject: [PATCH 060/445] improvement: faster way to load required state --- src/client_server/sync.rs | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6d8ac28d..a41e728e 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -376,24 +376,29 @@ async fn sync_helper( let mut state_events = Vec::new(); let mut lazy_loaded = Vec::new(); - for (_, id) in current_state_ids { - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - let state_key = pdu - .state_key - .as_ref() - .expect("state events have state keys"); - if pdu.kind != EventType::RoomMember { + for (shortstatekey, id) in current_state_ids { + let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; + + if event_type != EventType::RoomMember { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; state_events.push(pdu); } else if !lazy_load_enabled || body.full_state - || timeline_users.contains(state_key) + || timeline_users.contains(&state_key) { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; lazy_loaded.push( UserId::parse(state_key.as_ref()) .expect("they are in timeline_users, so they should be correct"), From f285c89006e48b0644f421ae399f1a8eb47e37f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 6 Jan 2022 00:15:34 +0100 Subject: [PATCH 061/445] fix: make incremental sync efficient again --- src/client_server/message.rs | 2 +- src/client_server/sync.rs | 77 ++++++++++++++++++++---------------- src/database/rooms.rs | 4 +- 3 files changed, 47 insertions(+), 36 deletions(-) diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 899c45a2..9705e4c0 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -242,7 +242,7 @@ pub async fn get_message_events_route( &sender_user, &sender_device, &body.room_id, - lazy_loaded.into_iter().collect(), + lazy_loaded, next_token, ); } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index a41e728e..c2014403 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -374,7 +374,7 @@ async fn sync_helper( let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let mut state_events = Vec::new(); - let mut lazy_loaded = Vec::new(); + let mut lazy_loaded = HashSet::new(); for (shortstatekey, id) in current_state_ids { let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; @@ -399,7 +399,7 @@ async fn sync_helper( continue; } }; - lazy_loaded.push( + lazy_loaded.insert( UserId::parse(state_key.as_ref()) .expect("they are in timeline_users, so they should be correct"), ); @@ -456,46 +456,57 @@ async fn sync_helper( let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; let mut state_events = Vec::new(); - let mut lazy_loaded = Vec::new(); + let mut lazy_loaded = HashSet::new(); for (key, id) in current_state_ids { - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - let state_key = pdu - .state_key - .as_ref() - .expect("state events have state keys"); + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; - if pdu.kind != EventType::RoomMember { - if body.full_state || since_state_ids.get(&key) != Some(&id) { - state_events.push(pdu); + if pdu.kind == EventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(e) => error!("Invalid state key for member event: {}", e), + } } - continue; - } - - // Pdu has to be a member event - let state_key_userid = UserId::parse(state_key.as_ref()) - .expect("they are in timeline_users, so they should be correct"); - if body.full_state || since_state_ids.get(&key) != Some(&id) { - lazy_loaded.push(state_key_userid); state_events.push(pdu); - } else if timeline_users.contains(state_key) - && (!db.rooms.lazy_load_was_sent_before( + } + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + if !db.rooms.lazy_load_was_sent_before( &sender_user, &sender_device, &room_id, - &state_key_userid, - )? || lazy_load_send_redundant) - { - lazy_loaded.push(state_key_userid); - state_events.push(pdu); + &event.sender, + )? || lazy_load_send_redundant + { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + lazy_loaded.insert(event.sender.clone()); + state_events.push(pdu); + } } } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index b957b55d..600f46df 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -120,7 +120,7 @@ pub struct Rooms { pub(super) our_real_users_cache: RwLock, Arc>>>>, pub(super) appservice_in_room_cache: RwLock, HashMap>>, pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), Vec>>>, + Mutex, Box, Box, u64), HashSet>>>, pub(super) stateinfo_cache: Mutex< LruCache< u64, @@ -3495,7 +3495,7 @@ impl Rooms { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - lazy_load: Vec>, + lazy_load: HashSet>, count: u64, ) { self.lazy_load_waiting.lock().unwrap().insert( From c6d88359d7aae985f9688dddb321d07ef2043708 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 7 Jan 2022 09:56:09 +0100 Subject: [PATCH 062/445] fix: incremental lazy loading --- src/client_server/sync.rs | 37 ++++++++++++++++++------------------- 1 file changed, 18 insertions(+), 19 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index c2014403..a6122893 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -484,28 +484,27 @@ async fn sync_helper( state_events.push(pdu); } - for (_, event) in &timeline_pdus { - if lazy_loaded.contains(&event.sender) { - continue; - } + } - if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, - &room_id, - &event.sender, - )? || lazy_load_send_redundant - { - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + if !db.rooms.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + if let Some(member_event) = db.rooms.room_state_get( + &room_id, + &EventType::RoomMember, + event.sender.as_str(), + )? { lazy_loaded.insert(event.sender.clone()); - state_events.push(pdu); + state_events.push(member_event); } } } From 4f39d36e980d8f4e6fcc7ae7c9a292db52d915e7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Jan 2022 13:42:25 +0100 Subject: [PATCH 063/445] docs: lazy loading --- src/client_server/sync.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index a6122893..bd2f48a3 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -40,13 +40,15 @@ use rocket::{get, tokio}; /// Calling this endpoint with a `since` parameter from a previous `next_batch` returns: /// For joined rooms: /// - Some of the most recent events of each timeline that happened after since -/// - If user joined the room after since: All state events and device list updates in that room +/// - If user joined the room after since: All state events (unless lazy loading is activated) and +/// all device list updates in that room /// - If the user was already in the room: A list of all events that are in the state now, but were /// not in the state at `since` /// - If the state we send contains a member event: Joined and invited member counts, heroes /// - Device list updates that happened after `since` /// - If there are events in the timeline we send or the user send updated his read mark: Notification counts /// - EDUs that are active now (read receipts, typing updates, presence) +/// - TODO: Allow multiple sync streams to support Pantalaimon /// /// For invited rooms: /// - If the user was invited after `since`: A subset of the state of the room at the point of the invite From fa6d7f7ccd14426f1fc2d802fff021b06f39bf02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Jan 2022 16:44:44 +0100 Subject: [PATCH 064/445] feat: database backend selection at runtime --- Cargo.toml | 2 +- conduit-example.toml | 15 ++- src/database.rs | 142 ++++++++++++++++------------ src/database/abstraction.rs | 13 ++- src/database/abstraction/rocksdb.rs | 9 +- src/database/abstraction/sqlite.rs | 14 ++- src/utils.rs | 11 --- 7 files changed, 117 insertions(+), 89 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 6241b6a8..c898d4d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -85,7 +85,7 @@ hmac = "0.11.0" sha-1 = "0.9.8" [features] -default = ["conduit_bin", "backend_rocksdb"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] backend_sled = ["sled"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] diff --git a/conduit-example.toml b/conduit-example.toml index 4275f528..c0274a4d 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -1,11 +1,15 @@ [global] -# The server_name is the name of this server. It is used as a suffix for user +# The server_name is the pretty name of this server. It is used as a suffix for user # and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server # for more information # YOU NEED TO EDIT THIS @@ -13,6 +17,7 @@ # This is the only directory where Conduit will save its data database_path = "/var/lib/conduit/" +database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port diff --git a/src/database.rs b/src/database.rs index ddf701bb..c2b3e2b9 100644 --- a/src/database.rs +++ b/src/database.rs @@ -44,13 +44,15 @@ use self::proxy::ProxyConfig; #[derive(Clone, Debug, Deserialize)] pub struct Config { server_name: Box, + #[serde(default = "default_database_backend")] + database_backend: String, database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, #[serde(default = "default_pdu_cache_capacity")] pdu_cache_capacity: u32, - #[serde(default = "default_sqlite_wal_clean_second_interval")] - sqlite_wal_clean_second_interval: u32, + #[serde(default = "default_cleanup_second_interval")] + cleanup_second_interval: u32, #[serde(default = "default_max_request_size")] max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] @@ -117,6 +119,10 @@ fn true_fn() -> bool { true } +fn default_database_backend() -> String { + "sqlite".to_owned() +} + fn default_db_cache_capacity_mb() -> f64 { 200.0 } @@ -125,7 +131,7 @@ fn default_pdu_cache_capacity() -> u32 { 100_000 } -fn default_sqlite_wal_clean_second_interval() -> u32 { +fn default_cleanup_second_interval() -> u32 { 1 * 60 // every minute } @@ -145,20 +151,8 @@ fn default_turn_ttl() -> u64 { 60 * 60 * 24 } -#[cfg(feature = "sled")] -pub type Engine = abstraction::sled::Engine; - -#[cfg(feature = "sqlite")] -pub type Engine = abstraction::sqlite::Engine; - -#[cfg(feature = "heed")] -pub type Engine = abstraction::heed::Engine; - -#[cfg(feature = "rocksdb")] -pub type Engine = abstraction::rocksdb::Engine; - pub struct Database { - _db: Arc, + _db: Arc, pub globals: globals::Globals, pub users: users::Users, pub uiaa: uiaa::Uiaa, @@ -186,27 +180,53 @@ impl Database { Ok(()) } - fn check_sled_or_sqlite_db(config: &Config) -> Result<()> { - #[cfg(feature = "backend_sqlite")] - { - let path = Path::new(&config.database_path); - - let sled_exists = path.join("db").exists(); - let sqlite_exists = path.join("conduit.db").exists(); - if sled_exists { - if sqlite_exists { - // most likely an in-place directory, only warn - warn!("Both sled and sqlite databases are detected in database directory"); - warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") - } else { - error!( - "Sled database detected, conduit now uses sqlite for database operations" - ); - error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); - return Err(Error::bad_config( - "sled database detected, migrate to sqlite", - )); - } + fn check_db_setup(config: &Config) -> Result<()> { + let path = Path::new(&config.database_path); + + let sled_exists = path.join("db").exists(); + let sqlite_exists = path.join("conduit.db").exists(); + let rocksdb_exists = path.join("IDENTITY").exists(); + + let mut count = 0; + + if sled_exists { + count += 1; + } + + if sqlite_exists { + count += 1; + } + + if rocksdb_exists { + count += 1; + } + + if count > 1 { + warn!("Multiple databases at database_path detected"); + return Ok(()); + } + + if sled_exists { + if config.database_backend != "sled" { + return Err(Error::bad_config( + "Found sled at database_path, but is not specified in config.", + )); + } + } + + if sqlite_exists { + if config.database_backend != "sqlite" { + return Err(Error::bad_config( + "Found sqlite at database_path, but is not specified in config.", + )); + } + } + + if rocksdb_exists { + if config.database_backend != "rocksdb" { + return Err(Error::bad_config( + "Found rocksdb at database_path, but is not specified in config.", + )); } } @@ -215,14 +235,30 @@ impl Database { /// Load an existing database or create a new one. pub async fn load_or_create(config: &Config) -> Result>> { - Self::check_sled_or_sqlite_db(config)?; + Self::check_db_setup(config)?; if !Path::new(&config.database_path).exists() { std::fs::create_dir_all(&config.database_path) .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; } - let builder = Engine::open(config)?; + let builder: Arc = match &*config.database_backend { + "sqlite" => { + #[cfg(not(feature = "sqlite"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "sqlite")] + Arc::new(Arc::::open(config)?) + } + "rocksdb" => { + #[cfg(not(feature = "rocksdb"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "rocksdb")] + Arc::new(Arc::::open(config)?) + } + _ => { + return Err(Error::BadConfig("Database backend not found.")); + } + }; if config.max_request_size < 1024 { eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); @@ -784,10 +820,7 @@ impl Database { drop(guard); - #[cfg(feature = "sqlite")] - { - Self::start_wal_clean_task(Arc::clone(&db), config).await; - } + Self::start_cleanup_task(Arc::clone(&db), config).await; Ok(db) } @@ -925,15 +958,8 @@ impl Database { res } - #[cfg(feature = "sqlite")] - #[tracing::instrument(skip(self))] - pub fn flush_wal(&self) -> Result<()> { - self._db.flush_wal() - } - - #[cfg(feature = "sqlite")] #[tracing::instrument(skip(db, config))] - pub async fn start_wal_clean_task(db: Arc>, config: &Config) { + pub async fn start_cleanup_task(db: Arc>, config: &Config) { use tokio::time::interval; #[cfg(unix)] @@ -942,7 +968,7 @@ impl Database { use std::time::{Duration, Instant}; - let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64); + let timer_interval = Duration::from_secs(config.cleanup_second_interval as u64); tokio::spawn(async move { let mut i = interval(timer_interval); @@ -953,23 +979,23 @@ impl Database { #[cfg(unix)] tokio::select! { _ = i.tick() => { - info!("wal-trunc: Timer ticked"); + info!("cleanup: Timer ticked"); } _ = s.recv() => { - info!("wal-trunc: Received SIGHUP"); + info!("cleanup: Received SIGHUP"); } }; #[cfg(not(unix))] { i.tick().await; - info!("wal-trunc: Timer ticked") + info!("cleanup: Timer ticked") } let start = Instant::now(); - if let Err(e) = db.read().await.flush_wal() { - error!("wal-trunc: Errored: {}", e); + if let Err(e) = db.read().await._db.cleanup() { + error!("cleanup: Errored: {}", e); } else { - info!("wal-trunc: Flushed in {:?}", start.elapsed()); + info!("cleanup: Finished in {:?}", start.elapsed()); } } }); diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index a347f831..45627bbc 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -18,10 +18,15 @@ pub mod rocksdb; #[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed"))] pub mod watchers; -pub trait DatabaseEngine: Sized { - fn open(config: &Config) -> Result>; - fn open_tree(self: &Arc, name: &'static str) -> Result>; - fn flush(self: &Arc) -> Result<()>; +pub trait DatabaseEngine: Send + Sync { + fn open(config: &Config) -> Result + where + Self: Sized; + fn open_tree(&self, name: &'static str) -> Result>; + fn flush(self: &Self) -> Result<()>; + fn cleanup(self: &Self) -> Result<()> { + Ok(()) + } } pub trait Tree: Send + Sync { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 397047bd..a41ed1fb 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -14,8 +14,8 @@ pub struct RocksDbEngineTree<'a> { write_lock: RwLock<()> } -impl DatabaseEngine for Engine { - fn open(config: &Config) -> Result> { +impl DatabaseEngine for Arc { + fn open(config: &Config) -> Result { let mut db_opts = rocksdb::Options::default(); db_opts.create_if_missing(true); db_opts.set_max_open_files(512); @@ -60,7 +60,7 @@ impl DatabaseEngine for Engine { })) } - fn open_tree(self: &Arc, name: &'static str) -> Result> { + fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist let mut options = rocksdb::Options::default(); @@ -68,7 +68,6 @@ impl DatabaseEngine for Engine { options.set_prefix_extractor(prefix_extractor); let _ = self.rocks.create_cf(name, &options); - println!("created cf"); } Ok(Arc::new(RocksDbEngineTree { @@ -79,7 +78,7 @@ impl DatabaseEngine for Engine { })) } - fn flush(self: &Arc) -> Result<()> { + fn flush(&self) -> Result<()> { // TODO? Ok(()) } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 31875667..d4fd0bdd 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -80,8 +80,8 @@ impl Engine { } } -impl DatabaseEngine for Engine { - fn open(config: &Config) -> Result> { +impl DatabaseEngine for Arc { + fn open(config: &Config) -> Result { let path = Path::new(&config.database_path).join("conduit.db"); // calculates cache-size per permanent connection @@ -92,7 +92,7 @@ impl DatabaseEngine for Engine { / ((num_cpus::get().max(1) * 2) + 1) as f64) as u32; - let writer = Mutex::new(Self::prepare_conn(&path, cache_size_per_thread)?); + let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?); let arc = Arc::new(Engine { writer, @@ -105,7 +105,7 @@ impl DatabaseEngine for Engine { Ok(arc) } - fn open_tree(self: &Arc, name: &str) -> Result> { + fn open_tree(&self, name: &str) -> Result> { self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; Ok(Arc::new(SqliteTable { @@ -115,10 +115,14 @@ impl DatabaseEngine for Engine { })) } - fn flush(self: &Arc) -> Result<()> { + fn flush(&self) -> Result<()> { // we enabled PRAGMA synchronous=normal, so this should not be necessary Ok(()) } + + fn cleanup(&self) -> Result<()> { + self.flush_wal() + } } pub struct SqliteTable { diff --git a/src/utils.rs b/src/utils.rs index 4702d051..26d71a8c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -29,17 +29,6 @@ pub fn increment(old: Option<&[u8]>) -> Option> { Some(number.to_be_bytes().to_vec()) } -#[cfg(feature = "rocksdb")] -pub fn increment_rocksdb( - _new_key: &[u8], - old: Option<&[u8]>, - _operands: &mut rocksdb::MergeOperands, -) -> Option> { - dbg!(_new_key); - dbg!(old); - increment(old) -} - pub fn generate_keypair() -> Vec { let mut value = random_string(8).as_bytes().to_vec(); value.push(0xff); From 71431f330aadb1ee92cd63a36351af834aa65215 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 9 Jan 2022 20:07:03 +0100 Subject: [PATCH 065/445] Add memory_usage() to DatabaseEngine trait --- src/database/abstraction.rs | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 45627bbc..17bd971f 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -27,6 +27,9 @@ pub trait DatabaseEngine: Send + Sync { fn cleanup(self: &Self) -> Result<()> { Ok(()) } + fn memory_usage(self: &Self) -> Result { + Ok("Current database engine does not support memory usage reporting.".to_string()) + } } pub trait Tree: Send + Sync { From ff243870f850c07907a6944151fd909c234da662 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 9 Jan 2022 20:07:50 +0100 Subject: [PATCH 066/445] Add "database_memory_usage" AdminCommand --- src/database/admin.rs | 8 ++++++++ src/database/rooms.rs | 3 +++ 2 files changed, 11 insertions(+) diff --git a/src/database/admin.rs b/src/database/admin.rs index 0702bcdd..c3083309 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -14,6 +14,7 @@ pub enum AdminCommand { RegisterAppservice(serde_yaml::Value), UnregisterAppservice(String), ListAppservices, + ShowMemoryUsage, SendMessage(RoomMessageEventContent), } @@ -113,6 +114,13 @@ impl Admin { send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); } } + AdminCommand::ShowMemoryUsage => { + if let Ok(response) = guard._db.memory_usage() { + send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock); + } else { + send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage".to_string()), guard, &state_lock); + } + } AdminCommand::SendMessage(message) => { send_message(message, guard, &state_lock); } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 600f46df..0ba6c9ba 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1693,6 +1693,9 @@ impl Rooms { )); } } + "database_memory_usage" => { + db.admin.send(AdminCommand::ShowMemoryUsage); + } _ => { db.admin.send(AdminCommand::SendMessage( RoomMessageEventContent::text_plain(format!( From 68ee1a5408595804625a6dd0ebab5f333e7f0fe6 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 9 Jan 2022 20:08:15 +0100 Subject: [PATCH 067/445] Add rocksdb implementation of memory_usage() --- src/database/abstraction/rocksdb.rs | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index a41ed1fb..f0affd32 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -82,6 +82,19 @@ impl DatabaseEngine for Arc { // TODO? Ok(()) } + + fn memory_usage(&self) -> Result { + let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), None)?; + Ok(format!("Approximate memory usage of all the mem-tables: {:.3} MB\n\ + Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ + Approximate memory usage of all the table readers: {:.3} MB\n\ + Approximate memory usage by cache: {:.3} MB", + stats.mem_table_total as f64 / 1024.0 / 1024.0, + stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, + stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, + stats.cache_total as f64 / 1024.0 / 1024.0 + )) + } } impl RocksDbEngineTree<'_> { From 077e9ad4380715688a8ad5a2f40afd7331157bd5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jan 2022 15:53:28 +0100 Subject: [PATCH 068/445] improvement: memory usage for caches --- Cargo.lock | 4 +-- Cargo.toml | 2 +- src/database/abstraction/rocksdb.rs | 40 ++++++++++++++++------------- src/database/admin.rs | 2 +- 4 files changed, 26 insertions(+), 22 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 794445f9..d297102c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2075,9 +2075,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.16.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c749134fda8bfc90d0de643d59bfc841dcb3ac8a1062e12b6754bd60235c48b3" +checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" dependencies = [ "libc", "librocksdb-sys", diff --git a/Cargo.toml b/Cargo.toml index c898d4d6..c87d949c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,7 +78,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.16.0", features = ["multi-threaded-cf"], optional = true } +rocksdb = { version = "0.17.0", features = ["multi-threaded-cf"], optional = true } thread_local = "1.1.3" # used for TURN server authentication hmac = "0.11.0" diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index f0affd32..a7dd6e16 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -4,6 +4,7 @@ use std::{future::Future, pin::Pin, sync::Arc, collections::HashMap, sync::RwLoc pub struct Engine { rocks: rocksdb::DBWithThreadMode, + cache: rocksdb::Cache, old_cfs: Vec, } @@ -56,6 +57,7 @@ impl DatabaseEngine for Arc { Ok(Arc::new(Engine { rocks: db, + cache: rocksdb_cache, old_cfs: cfs, })) } @@ -84,33 +86,35 @@ impl DatabaseEngine for Arc { } fn memory_usage(&self) -> Result { - let stats = rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), None)?; - Ok(format!("Approximate memory usage of all the mem-tables: {:.3} MB\n\ + let stats = + rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?; + Ok(format!( + "Approximate memory usage of all the mem-tables: {:.3} MB\n\ Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ Approximate memory usage of all the table readers: {:.3} MB\n\ Approximate memory usage by cache: {:.3} MB", - stats.mem_table_total as f64 / 1024.0 / 1024.0, - stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, - stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, - stats.cache_total as f64 / 1024.0 / 1024.0 + stats.mem_table_total as f64 / 1024.0 / 1024.0, + stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, + stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, + stats.cache_total as f64 / 1024.0 / 1024.0 )) } } impl RocksDbEngineTree<'_> { - fn cf(&self) -> rocksdb::BoundColumnFamily<'_> { + fn cf(&self) -> Arc> { self.db.rocks.cf_handle(self.name).unwrap() } } impl Tree for RocksDbEngineTree<'_> { fn get(&self, key: &[u8]) -> Result>> { - Ok(self.db.rocks.get_cf(self.cf(), key)?) + Ok(self.db.rocks.get_cf(&self.cf(), key)?) } fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let lock = self.write_lock.read().unwrap(); - self.db.rocks.put_cf(self.cf(), key, value)?; + self.db.rocks.put_cf(&self.cf(), key, value)?; drop(lock); self.watchers.wake(key); @@ -120,21 +124,21 @@ impl Tree for RocksDbEngineTree<'_> { fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { for (key, value) in iter { - self.db.rocks.put_cf(self.cf(), key, value)?; + self.db.rocks.put_cf(&self.cf(), key, value)?; } Ok(()) } fn remove(&self, key: &[u8]) -> Result<()> { - Ok(self.db.rocks.delete_cf(self.cf(), key)?) + Ok(self.db.rocks.delete_cf(&self.cf(), key)?) } fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { Box::new( self.db .rocks - .iterator_cf(self.cf(), rocksdb::IteratorMode::Start) + .iterator_cf(&self.cf(), rocksdb::IteratorMode::Start) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -148,7 +152,7 @@ impl Tree for RocksDbEngineTree<'_> { self.db .rocks .iterator_cf( - self.cf(), + &self.cf(), rocksdb::IteratorMode::From( from, if backwards { @@ -165,9 +169,9 @@ impl Tree for RocksDbEngineTree<'_> { fn increment(&self, key: &[u8]) -> Result> { let lock = self.write_lock.write().unwrap(); - let old = self.db.rocks.get_cf(self.cf(), &key)?; + let old = self.db.rocks.get_cf(&self.cf(), &key)?; let new = utils::increment(old.as_deref()).unwrap(); - self.db.rocks.put_cf(self.cf(), key, &new)?; + self.db.rocks.put_cf(&self.cf(), key, &new)?; drop(lock); Ok(new) @@ -177,9 +181,9 @@ impl Tree for RocksDbEngineTree<'_> { let lock = self.write_lock.write().unwrap(); for key in iter { - let old = self.db.rocks.get_cf(self.cf(), &key)?; + let old = self.db.rocks.get_cf(&self.cf(), &key)?; let new = utils::increment(old.as_deref()).unwrap(); - self.db.rocks.put_cf(self.cf(), key, new)?; + self.db.rocks.put_cf(&self.cf(), key, new)?; } drop(lock); @@ -195,7 +199,7 @@ impl Tree for RocksDbEngineTree<'_> { self.db .rocks .iterator_cf( - self.cf(), + &self.cf(), rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), ) .map(|(k, v)| (Vec::from(k), Vec::from(v))) diff --git a/src/database/admin.rs b/src/database/admin.rs index c3083309..7d2301d9 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -118,7 +118,7 @@ impl Admin { if let Ok(response) = guard._db.memory_usage() { send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock); } else { - send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage".to_string()), guard, &state_lock); + send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_string()), guard, &state_lock); } } AdminCommand::SendMessage(message) => { From 0bb7d76dec4b3f54b1cbb37e57ddbe54e1dbd38f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jan 2022 20:20:45 +0100 Subject: [PATCH 069/445] improvement: rocksdb configuration --- src/database/abstraction/rocksdb.rs | 33 +++++++++++++++-------------- 1 file changed, 17 insertions(+), 16 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index a7dd6e16..3f1793a2 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -17,25 +17,21 @@ pub struct RocksDbEngineTree<'a> { impl DatabaseEngine for Arc { fn open(config: &Config) -> Result { - let mut db_opts = rocksdb::Options::default(); - db_opts.create_if_missing(true); - db_opts.set_max_open_files(512); - db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); - db_opts.set_target_file_size_base(2 << 22); - db_opts.set_max_bytes_for_level_base(2 << 24); - db_opts.set_max_bytes_for_level_multiplier(2.0); - db_opts.set_num_levels(8); - db_opts.set_write_buffer_size(2 << 27); - let rocksdb_cache = rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize) .unwrap(); let mut block_based_options = rocksdb::BlockBasedOptions::default(); - block_based_options.set_block_size(2 << 19); block_based_options.set_block_cache(&rocksdb_cache); + + let mut db_opts = rocksdb::Options::default(); db_opts.set_block_based_table_factory(&block_based_options); + db_opts.create_if_missing(true); + db_opts.increase_parallelism(num_cpus::get() as i32); + db_opts.set_max_open_files(512); + db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.optimize_level_style_compaction((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize); let cfs = rocksdb::DBWithThreadMode::::list_cf( &db_opts, @@ -90,13 +86,18 @@ impl DatabaseEngine for Arc { rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?; Ok(format!( "Approximate memory usage of all the mem-tables: {:.3} MB\n\ - Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ - Approximate memory usage of all the table readers: {:.3} MB\n\ - Approximate memory usage by cache: {:.3} MB", + Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ + Approximate memory usage of all the table readers: {:.3} MB\n\ + Approximate memory usage by cache: {:.3} MB\n\ + self.cache.get_usage(): {:.3} MB\n\ + self.cache.get_pinned_usage(): {:.3} MB\n\ + ", stats.mem_table_total as f64 / 1024.0 / 1024.0, stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, - stats.cache_total as f64 / 1024.0 / 1024.0 + stats.cache_total as f64 / 1024.0 / 1024.0, + self.cache.get_usage() as f64 / 1024.0 / 1024.0, + self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, )) } } From b96822b6174de4d404bf0b9013a39f8fd2a06f87 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 10 Jan 2022 21:20:29 +0100 Subject: [PATCH 070/445] fix: use db options for column families too --- src/database/abstraction/rocksdb.rs | 57 ++++++++++++++++------------- 1 file changed, 32 insertions(+), 25 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 3f1793a2..c82e4bc8 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -4,6 +4,7 @@ use std::{future::Future, pin::Pin, sync::Arc, collections::HashMap, sync::RwLoc pub struct Engine { rocks: rocksdb::DBWithThreadMode, + cache_capacity_bytes: usize, cache: rocksdb::Cache, old_cfs: Vec, } @@ -15,23 +16,31 @@ pub struct RocksDbEngineTree<'a> { write_lock: RwLock<()> } +fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { + let mut block_based_options = rocksdb::BlockBasedOptions::default(); + block_based_options.set_block_cache(rocksdb_cache); + + let mut db_opts = rocksdb::Options::default(); + db_opts.set_block_based_table_factory(&block_based_options); + db_opts.create_if_missing(true); + db_opts.increase_parallelism(num_cpus::get() as i32); + db_opts.set_max_open_files(512); + db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.optimize_level_style_compaction(cache_capacity_bytes); + + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); + db_opts.set_prefix_extractor(prefix_extractor); + + db_opts +} + impl DatabaseEngine for Arc { fn open(config: &Config) -> Result { - let rocksdb_cache = - rocksdb::Cache::new_lru_cache((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize) - .unwrap(); - - let mut block_based_options = rocksdb::BlockBasedOptions::default(); - block_based_options.set_block_cache(&rocksdb_cache); - - let mut db_opts = rocksdb::Options::default(); - db_opts.set_block_based_table_factory(&block_based_options); - db_opts.create_if_missing(true); - db_opts.increase_parallelism(num_cpus::get() as i32); - db_opts.set_max_open_files(512); - db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); - db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.optimize_level_style_compaction((config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize); + let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; + let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); + + let db_opts = db_options(cache_capacity_bytes, &rocksdb_cache); let cfs = rocksdb::DBWithThreadMode::::list_cf( &db_opts, @@ -43,16 +52,16 @@ impl DatabaseEngine for Arc { &db_opts, &config.database_path, cfs.iter().map(|name| { - let mut options = rocksdb::Options::default(); - let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); - options.set_prefix_extractor(prefix_extractor); - - rocksdb::ColumnFamilyDescriptor::new(name, options) + rocksdb::ColumnFamilyDescriptor::new( + name, + db_options(cache_capacity_bytes, &rocksdb_cache), + ) }), )?; Ok(Arc::new(Engine { rocks: db, + cache_capacity_bytes, cache: rocksdb_cache, old_cfs: cfs, })) @@ -61,11 +70,9 @@ impl DatabaseEngine for Arc { fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist - let mut options = rocksdb::Options::default(); - let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); - options.set_prefix_extractor(prefix_extractor); - - let _ = self.rocks.create_cf(name, &options); + let _ = self + .rocks + .create_cf(name, &db_options(self.cache_capacity_bytes, &self.cache)); } Ok(Arc::new(RocksDbEngineTree { From 7f27af032b7d0cb79248607decd1bb5f2a818507 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Jan 2022 10:07:10 +0100 Subject: [PATCH 071/445] improvement: optimize rocksdb for spinning disks --- src/database/abstraction/rocksdb.rs | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index c82e4bc8..32095566 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -20,8 +20,20 @@ fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> ro let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_cache(rocksdb_cache); + // "Difference of spinning disk" + // https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html + block_based_options.set_block_size(64 * 1024); + block_based_options.set_cache_index_and_filter_blocks(true); + let mut db_opts = rocksdb::Options::default(); db_opts.set_block_based_table_factory(&block_based_options); + db_opts.set_optimize_filters_for_hits(true); + db_opts.set_skip_stats_update_on_db_open(true); + db_opts.set_level_compaction_dynamic_level_bytes(true); + db_opts.set_target_file_size_base(256 * 1024 * 1024); + db_opts.set_compaction_readahead_size(2 * 1024 * 1024); + db_opts.set_use_direct_reads(true); + db_opts.set_use_direct_io_for_flush_and_compaction(true); db_opts.create_if_missing(true); db_opts.increase_parallelism(num_cpus::get() as i32); db_opts.set_max_open_files(512); From 9e77f7617cfcdc8d1c0e1b3146cbef6566ed0dc8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Jan 2022 12:27:02 +0100 Subject: [PATCH 072/445] fix: disable direct IO again --- src/database/abstraction/rocksdb.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 32095566..b7f6d3b6 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -22,7 +22,7 @@ fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> ro // "Difference of spinning disk" // https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html - block_based_options.set_block_size(64 * 1024); + block_based_options.set_block_size(4 * 1024); block_based_options.set_cache_index_and_filter_blocks(true); let mut db_opts = rocksdb::Options::default(); @@ -31,9 +31,9 @@ fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> ro db_opts.set_skip_stats_update_on_db_open(true); db_opts.set_level_compaction_dynamic_level_bytes(true); db_opts.set_target_file_size_base(256 * 1024 * 1024); - db_opts.set_compaction_readahead_size(2 * 1024 * 1024); - db_opts.set_use_direct_reads(true); - db_opts.set_use_direct_io_for_flush_and_compaction(true); + //db_opts.set_compaction_readahead_size(2 * 1024 * 1024); + //db_opts.set_use_direct_reads(true); + //db_opts.set_use_direct_io_for_flush_and_compaction(true); db_opts.create_if_missing(true); db_opts.increase_parallelism(num_cpus::get() as i32); db_opts.set_max_open_files(512); From 447639054e21523aed76e408667c5263ccde85ab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Jan 2022 21:03:53 +0100 Subject: [PATCH 073/445] improvement: higher default pdu capacity --- src/client_server/device.rs | 2 +- src/database.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 03a3004b..f240f2e7 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -85,7 +85,7 @@ pub async fn update_device_route( Ok(update_device::Response {}.into()) } -/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// # `DELETE /_matrix/client/r0/devices/{deviceId}` /// /// Deletes the given device. /// diff --git a/src/database.rs b/src/database.rs index c2b3e2b9..9a71e737 100644 --- a/src/database.rs +++ b/src/database.rs @@ -128,7 +128,7 @@ fn default_db_cache_capacity_mb() -> f64 { } fn default_pdu_cache_capacity() -> u32 { - 100_000 + 1_000_000 } fn default_cleanup_second_interval() -> u32 { From a336027b0e45b512c55e4c0b68e095d40ebd01ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Jan 2022 21:11:45 +0100 Subject: [PATCH 074/445] fix: better memory usage message --- src/database/abstraction/rocksdb.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index b7f6d3b6..d1706d45 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -108,14 +108,12 @@ impl DatabaseEngine for Arc { Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ Approximate memory usage of all the table readers: {:.3} MB\n\ Approximate memory usage by cache: {:.3} MB\n\ - self.cache.get_usage(): {:.3} MB\n\ - self.cache.get_pinned_usage(): {:.3} MB\n\ + Approximate memory usage by cache pinned: {:.3} MB\n\ ", stats.mem_table_total as f64 / 1024.0 / 1024.0, stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, stats.cache_total as f64 / 1024.0 / 1024.0, - self.cache.get_usage() as f64 / 1024.0 / 1024.0, self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, )) } From 6fa01aa9826c2a4f7643289e0b86aee40efc59d2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Jan 2022 21:46:20 +0100 Subject: [PATCH 075/445] fix: remove dbg --- src/database/abstraction/sqlite.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index d4fd0bdd..f80f50e4 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -136,7 +136,7 @@ type TupleOfBytes = (Vec, Vec); impl SqliteTable { #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { - dbg!(&self.name); + //dbg!(&self.name); Ok(guard .prepare(format!("SELECT value FROM {} WHERE key = ?", self.name).as_str())? .query_row([key], |row| row.get(0)) @@ -145,7 +145,7 @@ impl SqliteTable { #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { - dbg!(&self.name); + //dbg!(&self.name); guard.execute( format!( "INSERT OR REPLACE INTO {} (key, value) VALUES (?, ?)", @@ -179,7 +179,7 @@ impl SqliteTable { .query_map([], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - dbg!(&name); + //dbg!(&name); r.unwrap() }), ); @@ -286,7 +286,7 @@ impl Tree for SqliteTable { .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - dbg!(&name); + //dbg!(&name); r.unwrap() }), ); @@ -311,7 +311,7 @@ impl Tree for SqliteTable { .query_map([from], |row| Ok((row.get_unwrap(0), row.get_unwrap(1)))) .unwrap() .map(move |r| { - dbg!(&name); + //dbg!(&name); r.unwrap() }), ); From 16f826773bc26dd388f04e3e862bef7d1be9cdeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Jan 2022 22:47:30 +0100 Subject: [PATCH 076/445] refactor: fix warnings --- src/database/abstraction/rocksdb.rs | 4 ++-- src/database/abstraction/sqlite.rs | 4 ++-- src/database/abstraction/watchers.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index d1706d45..79a3d82a 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,6 +1,6 @@ use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree}; use crate::{utils, Result}; -use std::{future::Future, pin::Pin, sync::Arc, collections::HashMap, sync::RwLock}; +use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, @@ -13,7 +13,7 @@ pub struct RocksDbEngineTree<'a> { db: Arc, name: &'a str, watchers: Watchers, - write_lock: RwLock<()> + write_lock: RwLock<()>, } fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index f80f50e4..d4aab7dd 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -172,7 +172,7 @@ impl SqliteTable { let statement_ref = NonAliasingBox(statement); - let name = self.name.clone(); + //let name = self.name.clone(); let iterator = Box::new( statement @@ -267,7 +267,7 @@ impl Tree for SqliteTable { let guard = self.engine.read_lock_iterator(); let from = from.to_vec(); // TODO change interface? - let name = self.name.clone(); + //let name = self.name.clone(); if backwards { let statement = Box::leak(Box::new( diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index fec1f27a..55cb60b3 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -1,8 +1,8 @@ use std::{ collections::{hash_map, HashMap}, - sync::RwLock, future::Future, pin::Pin, + sync::RwLock, }; use tokio::sync::watch; From f67785caaf6a4be5c7d330df0f7a89781aa21f91 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 13 Jan 2022 22:24:47 +0000 Subject: [PATCH 077/445] Fix(ci): Disable CARGO_HOME caching --- .gitlab-ci.yml | 13 +------------ 1 file changed, 1 insertion(+), 12 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 1dedd8ff..f47327b8 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -23,18 +23,12 @@ variables: interruptible: true image: "rust:latest" tags: ["docker"] - cache: - paths: - - cargohome - key: "build_cache--$TARGET--$CI_COMMIT_BRANCH" variables: CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow - CARGO_HOME: $CI_PROJECT_DIR/cargohome before_script: - 'echo "Building for target $TARGET"' - - "mkdir -p $CARGO_HOME" - "rustc --version && cargo --version && rustup show" # Print version info for debugging - "rustup target add $TARGET" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: @@ -219,15 +213,10 @@ test:cargo: image: "rust:latest" tags: ["docker"] variables: - CARGO_HOME: "$CI_PROJECT_DIR/cargohome" CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow - cache: - paths: - - cargohome - key: "test_cache--$CI_COMMIT_BRANCH" interruptible: true before_script: - - mkdir -p $CARGO_HOME + # - mkdir -p $CARGO_HOME - apt-get update -yqq - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - rustup component add clippy rustfmt From 80e51986c42ea449a3f1d7860c16722431f4fcaf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 14 Jan 2022 11:08:31 +0100 Subject: [PATCH 078/445] improvement: better default cache capacity --- src/database.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index 9a71e737..d688ff9f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -124,7 +124,7 @@ fn default_database_backend() -> String { } fn default_db_cache_capacity_mb() -> f64 { - 200.0 + 10.0 } fn default_pdu_cache_capacity() -> u32 { From d434dfb3a56afde239023685ca0a8d191355314b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 14 Jan 2022 11:40:49 +0100 Subject: [PATCH 079/445] feat: config option for rocksdb max open files --- src/database.rs | 6 ++++++ src/database/abstraction/rocksdb.rs | 29 ++++++++++++++++++++++------- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/src/database.rs b/src/database.rs index d688ff9f..fd7a1451 100644 --- a/src/database.rs +++ b/src/database.rs @@ -49,6 +49,8 @@ pub struct Config { database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, + #[serde(default = "default_rocksdb_max_open_files")] + rocksdb_max_open_files: i32, #[serde(default = "default_pdu_cache_capacity")] pdu_cache_capacity: u32, #[serde(default = "default_cleanup_second_interval")] @@ -127,6 +129,10 @@ fn default_db_cache_capacity_mb() -> f64 { 10.0 } +fn default_rocksdb_max_open_files() -> i32 { + 512 +} + fn default_pdu_cache_capacity() -> u32 { 1_000_000 } diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 79a3d82a..adda6787 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -5,6 +5,7 @@ use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, cache_capacity_bytes: usize, + max_open_files: i32, cache: rocksdb::Cache, old_cfs: Vec, } @@ -16,7 +17,11 @@ pub struct RocksDbEngineTree<'a> { write_lock: RwLock<()>, } -fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { +fn db_options( + cache_capacity_bytes: usize, + max_open_files: i32, + rocksdb_cache: &rocksdb::Cache, +) -> rocksdb::Options { let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_cache(rocksdb_cache); @@ -36,7 +41,7 @@ fn db_options(cache_capacity_bytes: usize, rocksdb_cache: &rocksdb::Cache) -> ro //db_opts.set_use_direct_io_for_flush_and_compaction(true); db_opts.create_if_missing(true); db_opts.increase_parallelism(num_cpus::get() as i32); - db_opts.set_max_open_files(512); + db_opts.set_max_open_files(max_open_files); db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); db_opts.optimize_level_style_compaction(cache_capacity_bytes); @@ -52,7 +57,11 @@ impl DatabaseEngine for Arc { let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); - let db_opts = db_options(cache_capacity_bytes, &rocksdb_cache); + let db_opts = db_options( + cache_capacity_bytes, + config.rocksdb_max_open_files, + &rocksdb_cache, + ); let cfs = rocksdb::DBWithThreadMode::::list_cf( &db_opts, @@ -66,7 +75,11 @@ impl DatabaseEngine for Arc { cfs.iter().map(|name| { rocksdb::ColumnFamilyDescriptor::new( name, - db_options(cache_capacity_bytes, &rocksdb_cache), + db_options( + cache_capacity_bytes, + config.rocksdb_max_open_files, + &rocksdb_cache, + ), ) }), )?; @@ -74,6 +87,7 @@ impl DatabaseEngine for Arc { Ok(Arc::new(Engine { rocks: db, cache_capacity_bytes, + max_open_files: config.rocksdb_max_open_files, cache: rocksdb_cache, old_cfs: cfs, })) @@ -82,9 +96,10 @@ impl DatabaseEngine for Arc { fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist - let _ = self - .rocks - .create_cf(name, &db_options(self.cache_capacity_bytes, &self.cache)); + let _ = self.rocks.create_cf( + name, + &db_options(self.cache_capacity_bytes, self.max_open_files, &self.cache), + ); } Ok(Arc::new(RocksDbEngineTree { From ab15ec6c32f2e5463369fe7a29f5ea2e6d9c4f2d Mon Sep 17 00:00:00 2001 From: Tglman Date: Fri, 18 Jun 2021 00:38:32 +0100 Subject: [PATCH 080/445] feat: Integration with persy using background ops --- Cargo.toml | 5 + src/database.rs | 6 + src/database/abstraction.rs | 5 +- src/database/abstraction/persy.rs | 245 ++++++++++++++++++++++++++++++ src/error.rs | 15 ++ 5 files changed, 275 insertions(+), 1 deletion(-) create mode 100644 src/database/abstraction/persy.rs diff --git a/Cargo.toml b/Cargo.toml index c87d949c..2dbd3fd3 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,10 @@ tokio = "1.11.0" # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } +persy = { git = "https://gitlab.com/tglman/persy.git", branch="master" , optional = true, features=["background_ops"] } +# Used by the persy write cache for background flush +timer = "0.2" +chrono = "0.4" # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" @@ -87,6 +91,7 @@ sha-1 = "0.9.8" [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] backend_sled = ["sled"] +backend_persy = ["persy"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] diff --git a/src/database.rs b/src/database.rs index d688ff9f..c2cd9f29 100644 --- a/src/database.rs +++ b/src/database.rs @@ -255,6 +255,12 @@ impl Database { #[cfg(feature = "rocksdb")] Arc::new(Arc::::open(config)?) } + "persy" => { + #[cfg(not(feature = "persy"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "persy")] + Arc::new(Arc::::open(config)?) + } _ => { return Err(Error::BadConfig("Database backend not found.")); } diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 17bd971f..9a3771f3 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -15,7 +15,10 @@ pub mod heed; #[cfg(feature = "rocksdb")] pub mod rocksdb; -#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed"))] +#[cfg(feature = "persy")] +pub mod persy; + +#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed", feature="persy"))] pub mod watchers; pub trait DatabaseEngine: Send + Sync { diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs new file mode 100644 index 00000000..5d633ab4 --- /dev/null +++ b/src/database/abstraction/persy.rs @@ -0,0 +1,245 @@ +use crate::{ + database::{ + abstraction::{DatabaseEngine, Tree}, + Config, + }, + Result, +}; +use persy::{ByteVec, OpenOptions, Persy, Transaction, TransactionConfig, ValueMode}; + +use std::{ + collections::HashMap, + future::Future, + pin::Pin, + sync::{Arc, RwLock}, +}; + +use tokio::sync::oneshot::Sender; +use tracing::warn; + +pub struct PersyEngine { + persy: Persy, +} + +impl DatabaseEngine for PersyEngine { + fn open(config: &Config) -> Result> { + let mut cfg = persy::Config::new(); + cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64); + + let persy = OpenOptions::new() + .create(true) + .config(cfg) + .open(&format!("{}/db.persy", config.database_path))?; + Ok(Arc::new(PersyEngine { persy })) + } + + fn open_tree(self: &Arc, name: &'static str) -> Result> { + // Create if it doesn't exist + if !self.persy.exists_index(name)? { + let mut tx = self.persy.begin()?; + tx.create_index::(name, ValueMode::Replace)?; + tx.prepare()?.commit()?; + } + + Ok(Arc::new(PersyTree { + persy: self.persy.clone(), + name: name.to_owned(), + watchers: RwLock::new(HashMap::new()), + })) + } + + fn flush(self: &Arc) -> Result<()> { + Ok(()) + } +} + +pub struct PersyTree { + persy: Persy, + name: String, + watchers: RwLock, Vec>>>, +} + +impl PersyTree { + fn begin(&self) -> Result { + Ok(self + .persy + .begin_with(TransactionConfig::new().set_background_sync(true))?) + } +} + +impl Tree for PersyTree { + #[tracing::instrument(skip(self, key))] + fn get(&self, key: &[u8]) -> Result>> { + let result = self + .persy + .get::(&self.name, &ByteVec::from(key))? + .next() + .map(|v| (*v).to_owned()); + Ok(result) + } + + #[tracing::instrument(skip(self, key, value))] + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?; + let watchers = self.watchers.read().unwrap(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write().unwrap(); + for prefix in triggered { + if let Some(txs) = watchers.remove(prefix) { + for tx in txs { + let _ = tx.send(()); + } + } + } + } + Ok(()) + } + + #[tracing::instrument(skip(self, iter))] + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + let mut tx = self.begin()?; + for (key, value) in iter { + tx.put::( + &self.name, + ByteVec::from(key.clone()), + ByteVec::from(value), + )?; + } + tx.prepare()?.commit()?; + Ok(()) + } + + #[tracing::instrument(skip(self, iter))] + fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + let mut tx = self.begin()?; + for key in iter { + let old = tx + .get::(&self.name, &ByteVec::from(key.clone()))? + .next() + .map(|v| (*v).to_owned()); + let new = crate::utils::increment(old.as_deref()).unwrap(); + tx.put::(&self.name, ByteVec::from(key), ByteVec::from(new))?; + } + tx.prepare()?.commit()?; + Ok(()) + } + + #[tracing::instrument(skip(self, key))] + fn remove(&self, key: &[u8]) -> Result<()> { + let mut tx = self.begin()?; + tx.remove::(&self.name, ByteVec::from(key), None)?; + tx.prepare()?.commit()?; + Ok(()) + } + + #[tracing::instrument(skip(self))] + fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { + let iter = self.persy.range::(&self.name, ..); + match iter { + Ok(iter) => Box::new(iter.filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + })), + Err(e) => { + warn!("error iterating {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + #[tracing::instrument(skip(self, from, backwards))] + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + 'a> { + let range = if backwards { + self.persy + .range::(&self.name, ..=ByteVec::from(from)) + } else { + self.persy + .range::(&self.name, ByteVec::from(from)..) + }; + match range { + Ok(iter) => { + let map = iter.filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + }); + if backwards { + Box::new(map.rev()) + } else { + Box::new(map) + } + } + Err(e) => { + warn!("error iterating with prefix {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + #[tracing::instrument(skip(self, key))] + fn increment(&self, key: &[u8]) -> Result> { + self.increment_batch(&mut Some(key.to_owned()).into_iter())?; + Ok(self.get(key)?.unwrap()) + } + + #[tracing::instrument(skip(self, prefix))] + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + 'a> { + let range_prefix = ByteVec::from(prefix.clone()); + let range = self + .persy + .range::(&self.name, range_prefix..); + + match range { + Ok(iter) => { + let owned_prefix = prefix.clone(); + Box::new( + iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix)) + .filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + }), + ) + } + Err(e) => { + warn!("error scanning prefix {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + #[tracing::instrument(skip(self, prefix))] + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + let (tx, rx) = tokio::sync::oneshot::channel(); + + self.watchers + .write() + .unwrap() + .entry(prefix.to_vec()) + .or_default() + .push(tx); + + Box::pin(async move { + // Tx is never destroyed + rx.await.unwrap(); + }) + } +} diff --git a/src/error.rs b/src/error.rs index 4d427da4..5ffe48c9 100644 --- a/src/error.rs +++ b/src/error.rs @@ -8,6 +8,9 @@ use ruma::{ use thiserror::Error; use tracing::warn; +#[cfg(feature = "persy")] +use persy::PersyError; + #[cfg(feature = "conduit_bin")] use { crate::RumaResponse, @@ -36,6 +39,9 @@ pub enum Error { #[from] source: rusqlite::Error, }, + #[cfg(feature = "persy")] + #[error("There was a problem with the connection to the persy database.")] + PersyError { source: PersyError }, #[cfg(feature = "heed")] #[error("There was a problem with the connection to the heed database: {error}")] HeedError { error: String }, @@ -142,3 +148,12 @@ where self.to_response().respond_to(r) } } + +#[cfg(feature = "persy")] +impl> From> for Error { + fn from(err: persy::PE) -> Self { + Error::PersyError { + source: err.error().into(), + } + } +} From 1cc41937bd9ae7679aa48c10074ac1041d3a94b5 Mon Sep 17 00:00:00 2001 From: Tglman Date: Thu, 23 Dec 2021 22:59:17 +0000 Subject: [PATCH 081/445] refactor:use generic watcher in persy implementation --- Cargo.lock | 46 ++++++++++++++++++++++++++++ Cargo.toml | 5 +--- src/database/abstraction/persy.rs | 50 ++++--------------------------- 3 files changed, 53 insertions(+), 48 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d297102c..df37fd58 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -293,6 +293,7 @@ dependencies = [ "opentelemetry", "opentelemetry-jaeger", "parking_lot", + "persy", "rand 0.8.4", "regex", "reqwest", @@ -374,6 +375,21 @@ dependencies = [ "libc", ] +[[package]] +name = "crc" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" + [[package]] name = "crc32fast" version = "1.3.0" @@ -1651,6 +1667,21 @@ version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +[[package]] +name = "persy" +version = "1.2.0" +source = "git+https://gitlab.com/tglman/persy.git?branch=master#ff102d6edeaf14d30a846c2e2376a814685d09e7" +dependencies = [ + "crc", + "data-encoding", + "fs2", + "linked-hash-map", + "rand 0.8.4", + "thiserror", + "unsigned-varint", + "zigzag", +] + [[package]] name = "pin-project" version = "1.0.10" @@ -3290,6 +3321,12 @@ version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f14ee04d9415b52b3aeab06258a3f07093182b88ba0f9b8d203f211a7a7d41c7" +[[package]] +name = "unsigned-varint" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" + [[package]] name = "untrusted" version = "0.7.1" @@ -3532,6 +3569,15 @@ dependencies = [ "synstructure", ] +[[package]] +name = "zigzag" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70b40401a28d86ce16a330b863b86fd7dbee4d7c940587ab09ab8c019f9e3fdf" +dependencies = [ + "num-traits", +] + [[package]] name = "zstd" version = "0.9.2+zstd.1.5.1" diff --git a/Cargo.toml b/Cargo.toml index 2dbd3fd3..7c94a693 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,9 +29,6 @@ tokio = "1.11.0" sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } persy = { git = "https://gitlab.com/tglman/persy.git", branch="master" , optional = true, features=["background_ops"] } -# Used by the persy write cache for background flush -timer = "0.2" -chrono = "0.4" # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" @@ -91,7 +88,7 @@ sha-1 = "0.9.8" [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] backend_sled = ["sled"] -backend_persy = ["persy"] +backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs index 5d633ab4..71efed3b 100644 --- a/src/database/abstraction/persy.rs +++ b/src/database/abstraction/persy.rs @@ -1,20 +1,14 @@ use crate::{ database::{ - abstraction::{DatabaseEngine, Tree}, + abstraction::{watchers::Watchers, DatabaseEngine, Tree}, Config, }, Result, }; use persy::{ByteVec, OpenOptions, Persy, Transaction, TransactionConfig, ValueMode}; -use std::{ - collections::HashMap, - future::Future, - pin::Pin, - sync::{Arc, RwLock}, -}; +use std::{future::Future, pin::Pin, sync::Arc}; -use tokio::sync::oneshot::Sender; use tracing::warn; pub struct PersyEngine { @@ -44,7 +38,7 @@ impl DatabaseEngine for PersyEngine { Ok(Arc::new(PersyTree { persy: self.persy.clone(), name: name.to_owned(), - watchers: RwLock::new(HashMap::new()), + watchers: Watchers::default(), })) } @@ -56,7 +50,7 @@ impl DatabaseEngine for PersyEngine { pub struct PersyTree { persy: Persy, name: String, - watchers: RwLock, Vec>>>, + watchers: Watchers, } impl PersyTree { @@ -81,27 +75,7 @@ impl Tree for PersyTree { #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?; - let watchers = self.watchers.read().unwrap(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write().unwrap(); - for prefix in triggered { - if let Some(txs) = watchers.remove(prefix) { - for tx in txs { - let _ = tx.send(()); - } - } - } - } + self.watchers.wake(key); Ok(()) } @@ -228,18 +202,6 @@ impl Tree for PersyTree { #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.watchers - .write() - .unwrap() - .entry(prefix.to_vec()) - .or_default() - .push(tx); - - Box::pin(async move { - // Tx is never destroyed - rx.await.unwrap(); - }) + self.watchers.watch(prefix) } } From f9977ca64f84768c0a71535f6038f4a6487ddc17 Mon Sep 17 00:00:00 2001 From: Tglman Date: Thu, 13 Jan 2022 22:37:19 +0000 Subject: [PATCH 082/445] fix: changes to update to the last database engine trait definition --- src/database/abstraction.rs | 7 ++++++- src/database/abstraction/persy.rs | 12 ++++++------ 2 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 9a3771f3..09081826 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -18,7 +18,12 @@ pub mod rocksdb; #[cfg(feature = "persy")] pub mod persy; -#[cfg(any(feature = "sqlite", feature = "rocksdb", feature = "heed", feature="persy"))] +#[cfg(any( + feature = "sqlite", + feature = "rocksdb", + feature = "heed", + feature = "persy" +))] pub mod watchers; pub trait DatabaseEngine: Send + Sync { diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs index 71efed3b..628cf32b 100644 --- a/src/database/abstraction/persy.rs +++ b/src/database/abstraction/persy.rs @@ -11,12 +11,12 @@ use std::{future::Future, pin::Pin, sync::Arc}; use tracing::warn; -pub struct PersyEngine { +pub struct Engine { persy: Persy, } -impl DatabaseEngine for PersyEngine { - fn open(config: &Config) -> Result> { +impl DatabaseEngine for Arc { + fn open(config: &Config) -> Result { let mut cfg = persy::Config::new(); cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64); @@ -24,10 +24,10 @@ impl DatabaseEngine for PersyEngine { .create(true) .config(cfg) .open(&format!("{}/db.persy", config.database_path))?; - Ok(Arc::new(PersyEngine { persy })) + Ok(Arc::new(Engine { persy })) } - fn open_tree(self: &Arc, name: &'static str) -> Result> { + fn open_tree(&self, name: &'static str) -> Result> { // Create if it doesn't exist if !self.persy.exists_index(name)? { let mut tx = self.persy.begin()?; @@ -42,7 +42,7 @@ impl DatabaseEngine for PersyEngine { })) } - fn flush(self: &Arc) -> Result<()> { + fn flush(&self) -> Result<()> { Ok(()) } } From c1cd4b5e26c68d1c5e91f85df2a65591f774d13c Mon Sep 17 00:00:00 2001 From: Tglman Date: Fri, 14 Jan 2022 21:00:13 +0000 Subject: [PATCH 083/445] chore: set the released version of persy in Cargo.toml --- Cargo.lock | 3 ++- Cargo.toml | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index df37fd58..469c5663 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1670,7 +1670,8 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "persy" version = "1.2.0" -source = "git+https://gitlab.com/tglman/persy.git?branch=master#ff102d6edeaf14d30a846c2e2376a814685d09e7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "29c6aa7d7f093620a28b74fcf5f5da73ba17a9e52fcbbdbb4ecc89e61cb2d673" dependencies = [ "crc", "data-encoding", diff --git a/Cargo.toml b/Cargo.toml index 7c94a693..df879c36 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ tokio = "1.11.0" # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { git = "https://gitlab.com/tglman/persy.git", branch="master" , optional = true, features=["background_ops"] } +persy = { version = "1.2" , optional = true, features=["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" From fb19114bd9bbfc9bcc50caaab72074247bbe726b Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 15 Jan 2022 15:52:47 +0100 Subject: [PATCH 084/445] rename iter_locals to get_local_users; make get_local_users skip on parse errors; remove deprecated function count_local_users --- src/database/admin.rs | 5 +---- src/database/users.rs | 22 +++++++++------------- 2 files changed, 10 insertions(+), 17 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 5418f53a..859977e9 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -95,14 +95,11 @@ impl Admin { match event { AdminCommand::ListLocalUsers => { - // collect local users only - let users = guard.users.iter_locals(); + let users = guard.users.get_local_users(); let mut msg: String = format!("Found {} local user account(s):\n", users.len()); msg += &users.join("\n"); - // send number of local users as plain text: - // TODO: send as Markdown send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); } AdminCommand::RegisterAppservice(yaml) => { diff --git a/src/database/users.rs b/src/database/users.rs index d3e1fe43..021c710b 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -84,16 +84,6 @@ impl Users { Ok(self.userid_password.iter().count()) } - /// The method is DEPRECATED and was replaced by iter_locals() - /// - /// This method will only count those local user accounts with - /// a password thus returning only real accounts on this instance. - #[tracing::instrument(skip(self))] - pub fn count_local_users(&self) -> Result { - let n = self.userid_password.iter().filter(|(_, bytes)| bytes.len() > 0).count(); - Ok(n) - } - /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] @@ -134,13 +124,19 @@ impl Users { }) } - /// Returns a vector of local usernames + /// Returns a list of local usernames, that is, a parseable username + /// with a password of length greater then zero bytes. + /// If utils::string_from_bytes returns an error that username will be skipped + /// and the function will log the error #[tracing::instrument(skip(self))] - pub fn iter_locals(&self) -> Vec { + pub fn get_local_users(&self) -> Vec { self.userid_password.iter().filter(|(_, pw)| pw.len() > 0).map(|(username, _)| { match utils::string_from_bytes(&username) { Ok(s) => s, - Err(e) => e.to_string() + Err(e) => { + Error::bad_database(format!("Failed to parse username: {}", e.to_string())); + None + } } }).collect::>() } From 91eb6c4d08c5293f8af6436489923871fb2477a9 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 15 Jan 2022 17:10:23 +0100 Subject: [PATCH 085/445] Return a Result instead of a vector --- src/database/admin.rs | 16 ++++++++++------ src/database/users.rs | 18 +++++++++++------- 2 files changed, 21 insertions(+), 13 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 859977e9..7799ffa2 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -95,12 +95,16 @@ impl Admin { match event { AdminCommand::ListLocalUsers => { - let users = guard.users.get_local_users(); - - let mut msg: String = format!("Found {} local user account(s):\n", users.len()); - msg += &users.join("\n"); - - send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); + match guard.users.get_local_users() { + Ok(users) => { + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); + } + Err(e) => { + send_message(RoomMessageEventContent::text_plain(e.to_string()), guard, &state_lock); + } + } } AdminCommand::RegisterAppservice(yaml) => { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error diff --git a/src/database/users.rs b/src/database/users.rs index 021c710b..7d14b3e4 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -84,7 +84,6 @@ impl Users { Ok(self.userid_password.iter().count()) } - /// Find out which user an access token belongs to. #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { @@ -129,16 +128,21 @@ impl Users { /// If utils::string_from_bytes returns an error that username will be skipped /// and the function will log the error #[tracing::instrument(skip(self))] - pub fn get_local_users(&self) -> Vec { - self.userid_password.iter().filter(|(_, pw)| pw.len() > 0).map(|(username, _)| { - match utils::string_from_bytes(&username) { + pub fn get_local_users(&self) -> Result> { + self.userid_password + .iter() + .filter(|(_, pw)| pw.len() > 0) + .map(|(username, _)| match utils::string_from_bytes(&username) { Ok(s) => s, Err(e) => { - Error::bad_database(format!("Failed to parse username: {}", e.to_string())); + Error::bad_database(format!( + "Failed to parse username while calling get_local_users(): {}", + e.to_string() + )); None } - } - }).collect::>() + }) + .collect::>>() } /// Returns the password hash for the given user. From 217e3789929b7a1b227058b3b88664ee5f74ca75 Mon Sep 17 00:00:00 2001 From: Julius de Bruijn Date: Sat, 15 Jan 2022 17:34:13 +0000 Subject: [PATCH 086/445] Add mautrix-signal to tested appservices --- APPSERVICES.md | 36 +----------------------------------- 1 file changed, 1 insertion(+), 35 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index 894bc6f4..f23918b4 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -57,38 +57,4 @@ These appservices have been tested and work with Conduit without any extra steps - [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) - [mautrix-hangouts](https://github.com/mautrix/hangouts/) - [mautrix-telegram](https://github.com/mautrix/telegram/) - -### [mautrix-signal](https://github.com/mautrix/signal) - -There are a few things you need to do, in order for the Signal bridge (at least -up to version `0.2.0`) to work. How you do this depends on whether you use -Docker or `virtualenv` to run it. In either case you need to modify -[portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py). -Do this **before** following the bridge installation guide. - -1. **Create a copy of `portal.py`**. Go to - [portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py) -at [mautrix-signal](https://github.com/mautrix/signal) (make sure you change to -the correct commit/version of mautrix-signal you're using) and copy its -content. Create a new `portal.py` on your system and paste the content in. -2. **Patch the copy**. Exact line numbers may be slightly different, look nearby if they don't match: - - [Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020) - ```diff - --- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100 - +++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100 - ``` - - [Between lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) add a new line: - ```diff - "type": str(EventType.ROOM_POWER_LEVELS), - +++ "state_key": "", - "content": power_levels.serialize(), - ``` -3. **Deploy the patch**. This is different depending on how you have `mautrix-signal` deployed: - - [*If using virtualenv*] Copy your patched `portal.py` to `./lib/python3.7/site-packages/mautrix_signal/portal.py` (the exact version of Python may be different on your system). - - [*If using Docker*] Map the patched `portal.py` into the `mautrix-signal` container: - - ```yaml - volumes: - - ./your/path/on/host/portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py - ``` -4. Now continue with the [bridge installation instructions ](https://docs.mau.fi/bridges/index.html) and the general bridge notes above. +- [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward. From c03bf6ef11bd88459d6dc1eed75d23879ae2fa1a Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 15 Jan 2022 22:20:51 +0100 Subject: [PATCH 087/445] name the function after its purpose: iter_locals -> get_local_users --- src/database/users.rs | 37 ++++++++++++++++++++++++++----------- 1 file changed, 26 insertions(+), 11 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index 7d14b3e4..6f51e1f9 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -129,20 +129,35 @@ impl Users { /// and the function will log the error #[tracing::instrument(skip(self))] pub fn get_local_users(&self) -> Result> { - self.userid_password + let users: Vec = self + .userid_password .iter() - .filter(|(_, pw)| pw.len() > 0) - .map(|(username, _)| match utils::string_from_bytes(&username) { - Ok(s) => s, - Err(e) => { - Error::bad_database(format!( - "Failed to parse username while calling get_local_users(): {}", - e.to_string() - )); + .filter_map(|(username, pw)| self.get_username_on_valid_password(&username, &pw)) + .collect(); + Ok(users) + } + + /// A private helper to avoid double filtering the iterator + fn get_username_on_valid_password(&self, username: &[u8], password: &[u8]) -> Option { + // A valid password is not empty + if password.len() > 0 { + match utils::string_from_bytes(username) { + Ok(u) => Some(u), + Err(_) => { + // TODO: add error cause! + // let msg: String = format!( + // "Failed to parse username while calling get_local_users(): {}", + // e.to_string() + // ); + Error::bad_database( + "Failed to parse username while calling get_username_on_valid_password", + ); None } - }) - .collect::>>() + } + } else { + None + } } /// Returns the password hash for the given user. From 9205c070485a234463119182bd976a0d45c1ace0 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sat, 15 Jan 2022 22:37:39 +0100 Subject: [PATCH 088/445] Update get_local_users description --- src/database/users.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index 6f51e1f9..e510140d 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -123,10 +123,11 @@ impl Users { }) } - /// Returns a list of local usernames, that is, a parseable username - /// with a password of length greater then zero bytes. + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password + /// is greater then zero. /// If utils::string_from_bytes returns an error that username will be skipped - /// and the function will log the error #[tracing::instrument(skip(self))] pub fn get_local_users(&self) -> Result> { let users: Vec = self @@ -138,6 +139,7 @@ impl Users { } /// A private helper to avoid double filtering the iterator + #[tracing::instrument(skip(self))] fn get_username_on_valid_password(&self, username: &[u8], password: &[u8]) -> Option { // A valid password is not empty if password.len() > 0 { From 13ae036ca04b4ebd427444252ef9856b3028b7ac Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Sun, 16 Jan 2022 13:52:23 +0200 Subject: [PATCH 089/445] Move and refactor admin commands into admin module --- src/database/admin.rs | 239 +++++++++++++++++++++++++++++++++++++++++- src/database/rooms.rs | 220 +------------------------------------- 2 files changed, 240 insertions(+), 219 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 7d2301d9..518d7587 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,10 +1,17 @@ -use std::{convert::TryInto, sync::Arc}; +use std::{convert::TryFrom, convert::TryInto, sync::Arc, time::Instant}; -use crate::{pdu::PduBuilder, Database}; -use rocket::futures::{channel::mpsc, stream::StreamExt}; +use crate::{ + error::{Error, Result}, + pdu::PduBuilder, + server_server, Database, PduEvent, +}; +use rocket::{ + futures::{channel::mpsc, stream::StreamExt}, + http::RawStr, +}; use ruma::{ events::{room::message::RoomMessageEventContent, EventType}, - UserId, + EventId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; @@ -137,3 +144,227 @@ impl Admin { self.sender.unbounded_send(command).unwrap(); } } + +pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) -> AdminCommand { + let mut parts = command_line.split_whitespace().skip(1); + + let command_name = match parts.next() { + Some(command) => command, + None => { + let message = "No command given. Use help for a list of commands."; + return AdminCommand::SendMessage(RoomMessageEventContent::text_html( + html_to_markdown(message), + message, + )); + } + }; + + let args: Vec<_> = parts.collect(); + + match try_parse_admin_command(db, command_name, args, body) { + Ok(admin_command) => admin_command, + Err(error) => { + let message = format!( + "Encountered error while handling {} command:\n\ +
      {}
      ", + command_name, error, + ); + + AdminCommand::SendMessage(RoomMessageEventContent::text_html( + html_to_markdown(&message), + message, + )) + } + } +} + +// Helper for `RoomMessageEventContent::text_html`, which needs the content as +// both markdown and HTML. +fn html_to_markdown(text: &str) -> String { + text.replace("

      ", "") + .replace("

      ", "\n") + .replace("
      ", "```\n")
      +        .replace("
      ", "\n```") + .replace("", "`") + .replace("", "`") + .replace("
    • ", "* ") + .replace("
    • ", "") + .replace("
        \n", "") + .replace("
      \n", "") +} + +const HELP_TEXT: &'static str = r#" +

      The following commands are available:

      +
        +
      • register_appservice: Register a bridge using its registration YAML
      • +
      • unregister_appservice: Unregister a bridge using its ID
      • +
      • list_appservices: List all the currently registered bridges
      • +
      • get_auth_chain: Get the `auth_chain` of a PDU
      • +
      • parse_pdu: Parse and print a PDU from a JSON
      • +
      • get_pdu: Retrieve and print a PDU by ID from the Conduit database
      • +
      • database_memory_usage: Print database memory usage statistics
      • +
          +"#; + +pub fn try_parse_admin_command( + db: &Database, + command: &str, + args: Vec<&str>, + body: Vec<&str>, +) -> Result { + let command = match command { + "help" => AdminCommand::SendMessage(RoomMessageEventContent::text_html( + html_to_markdown(HELP_TEXT), + HELP_TEXT, + )), + "register_appservice" => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let appservice_config = body[1..body.len() - 1].join("\n"); + let parsed_config = serde_yaml::from_str::(&appservice_config); + match parsed_config { + Ok(yaml) => AdminCommand::RegisterAppservice(yaml), + Err(e) => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + format!("Could not parse appservice config: {}", e), + )), + } + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Expected code block in command body.", + )) + } + } + "unregister_appservice" => { + if args.len() == 1 { + AdminCommand::UnregisterAppservice(args[0].to_owned()) + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Missing appservice identifier", + )) + } + } + "list_appservices" => AdminCommand::ListAppservices, + "get_auth_chain" => { + if args.len() == 1 { + if let Ok(event_id) = EventId::parse_arc(args[0]) { + if let Some(event) = db.rooms.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; + let start = Instant::now(); + let count = + server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); + let elapsed = start.elapsed(); + return Ok(AdminCommand::SendMessage( + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )), + )); + } + } + } + + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Usage: get_auth_chain ", + )) + } + "parse_pdu" => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + let event_id = EventId::parse(format!( + "${}", + // Anything higher than version3 behaves the same + ruma::signatures::reference_hash(&value, &RoomVersionId::V6) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + Ok(pdu) => { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + format!("EventId: {:?}\n{:#?}", event_id, pdu), + )) + } + Err(e) => AdminCommand::SendMessage( + RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), + ), + } + } + Err(e) => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + format!("Invalid json in command body: {}", e), + )), + } + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Expected code block in command body.", + )) + } + } + "get_pdu" => { + if args.len() == 1 { + if let Ok(event_id) = EventId::parse(args[0]) { + let mut outlier = false; + let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = db.rooms.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + let json_text = serde_json::to_string_pretty(&json) + .expect("canonical json is valid json"); + AdminCommand::SendMessage( + RoomMessageEventContent::text_html( + format!("{}\n```json\n{}\n```", + if outlier { + "PDU is outlier" + } else { "PDU was accepted"}, json_text), + format!("

          {}

          \n
          {}\n
          \n", + if outlier { + "PDU is outlier" + } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) + ), + ) + } + None => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "PDU not found.", + )), + } + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Event ID could not be parsed.", + )) + } + } else { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain( + "Usage: get_pdu ", + )) + } + } + "database_memory_usage" => AdminCommand::ShowMemoryUsage, + _ => { + let message = format!( + "Unrecognized command {}, try help for a list of commands.", + command, + ); + AdminCommand::SendMessage(RoomMessageEventContent::text_html( + html_to_markdown(&message), + message, + )) + } + }; + + Ok(command) +} diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0ba6c9ba..14df8f50 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,13 +3,13 @@ mod edus; pub use edus::RoomEdus; use crate::{ + database::admin::parse_admin_command, pdu::{EventHash, PduBuilder}, - server_server, utils, Database, Error, PduEvent, Result, + utils, Database, Error, PduEvent, Result, }; use lru_cache::LruCache; use regex::Regex; use ring::digest; -use rocket::http::RawStr; use ruma::{ api::{client::error::ErrorKind, federation}, events::{ @@ -19,7 +19,6 @@ use ruma::{ room::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, power_levels::RoomPowerLevelsEventContent, }, tag::TagEvent, @@ -40,12 +39,11 @@ use std::{ iter, mem::size_of, sync::{Arc, Mutex, RwLock}, - time::Instant, }; use tokio::sync::MutexGuard; use tracing::{error, warn}; -use super::{abstraction::Tree, admin::AdminCommand, pusher}; +use super::{abstraction::Tree, pusher}; /// The unique identifier of each state group. /// @@ -1496,216 +1494,8 @@ impl Rooms { let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); - let mut parts = command_line.split_whitespace().skip(1); - if let Some(command) = parts.next() { - let args: Vec<_> = parts.collect(); - - match command { - "register_appservice" => { - if body.len() > 2 - && body[0].trim() == "```" - && body.last().unwrap().trim() == "```" - { - let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::( - &appservice_config, - ); - match parsed_config { - Ok(yaml) => { - db.admin - .send(AdminCommand::RegisterAppservice(yaml)); - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {}", - e - )), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Expected code block in command body.", - ), - )); - } - } - "unregister_appservice" => { - if args.len() == 1 { - db.admin.send(AdminCommand::UnregisterAppservice( - args[0].to_owned(), - )); - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Missing appservice identifier", - ), - )); - } - } - "list_appservices" => { - db.admin.send(AdminCommand::ListAppservices); - } - "get_auth_chain" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::parse_arc(args[0]) { - if let Some(event) = db.rooms.get_pdu_json(&event_id)? { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| { - Error::bad_database( - "Invalid event in database", - ) - })?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let start = Instant::now(); - let count = server_server::get_auth_chain( - room_id, - vec![event_id], - db, - )? - .count(); - let elapsed = start.elapsed(); - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )), - )); - } - } - } - } - "parse_pdu" => { - if body.len() > 2 - && body[0].trim() == "```" - && body.last().unwrap().trim() == "```" - { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - let event_id = EventId::parse(format!( - "${}", - // Anything higher than version3 behaves the same - ruma::signatures::reference_hash( - &value, - &RoomVersionId::V6 - ) - .expect("ruma can calculate reference hashes") - )) - .expect( - "ruma's reference hashes are valid event ids", - ); - - match serde_json::from_value::( - serde_json::to_value(value) - .expect("value is json"), - ) { - Ok(pdu) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - format!( - "EventId: {:?}\n{:#?}", - event_id, pdu - ), - ), - )); - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - format!("EventId: {:?}\nCould not parse event: {}", event_id, e), - ), - )); - } - } - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {}", - e - )), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Expected code block in command body.", - ), - )); - } - } - "get_pdu" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::parse(args[0]) { - let mut outlier = false; - let mut pdu_json = - db.rooms.get_non_outlier_pdu_json(&event_id)?; - if pdu_json.is_none() { - outlier = true; - pdu_json = db.rooms.get_pdu_json(&event_id)?; - } - match pdu_json { - Some(json) => { - let json_text = - serde_json::to_string_pretty(&json) - .expect("canonical json is valid json"); - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_html( - format!("{}\n```json\n{}\n```", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, json_text), - format!("

          {}

          \n
          {}\n
          \n", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) - ), - )); - } - None => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "PDU not found.", - ), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Event ID could not be parsed.", - ), - )); - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Usage: get_pdu ", - ), - )); - } - } - "database_memory_usage" => { - db.admin.send(AdminCommand::ShowMemoryUsage); - } - _ => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Unrecognized command: {}", - command - )), - )); - } - } - } + let command = parse_admin_command(db, command_line, body); + db.admin.send(command); } } } From 3e79d154957211b98343f18077282b6ab5e6d36e Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 16 Jan 2022 20:15:53 +0100 Subject: [PATCH 090/445] Updated function documentation --- src/database/users.rs | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index e510140d..9fe4a4ee 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -125,28 +125,27 @@ impl Users { /// Returns a list of local users as list of usernames. /// - /// A user account is considered `local` if the length of it's password - /// is greater then zero. - /// If utils::string_from_bytes returns an error that username will be skipped + /// A user account is considered `local` if the length of it's password is greater then zero. #[tracing::instrument(skip(self))] pub fn get_local_users(&self) -> Result> { let users: Vec = self .userid_password .iter() - .filter_map(|(username, pw)| self.get_username_on_valid_password(&username, &pw)) + .filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw)) .collect(); Ok(users) } - /// A private helper to avoid double filtering the iterator + /// A private helper to avoid double filtering the iterator in get_local_users(). + /// If utils::string_from_bytes(...) returns an error that username will be skipped + /// and the error will be logged. TODO: add error cause. #[tracing::instrument(skip(self))] - fn get_username_on_valid_password(&self, username: &[u8], password: &[u8]) -> Option { + fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { // A valid password is not empty if password.len() > 0 { match utils::string_from_bytes(username) { Ok(u) => Some(u), Err(_) => { - // TODO: add error cause! // let msg: String = format!( // "Failed to parse username while calling get_local_users(): {}", // e.to_string() From 52284ef9e2de88798408913b73d6b783768e13f3 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 16 Jan 2022 20:25:16 +0100 Subject: [PATCH 091/445] Add some debug/info if user was found --- src/database/users.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/database/users.rs b/src/database/users.rs index 9fe4a4ee..f73c1c8b 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -10,6 +10,7 @@ use ruma::{ }; use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; use tracing::warn; +use tracing::info; use super::abstraction::Tree; @@ -144,7 +145,10 @@ impl Users { // A valid password is not empty if password.len() > 0 { match utils::string_from_bytes(username) { - Ok(u) => Some(u), + Ok(u) => { + info!("list_local_users_test: found user {}", u); + Some(u) + }, Err(_) => { // let msg: String = format!( // "Failed to parse username while calling get_local_users(): {}", From 50430cf4ab8c742b3942b2735f0d264b17be936e Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Sun, 16 Jan 2022 21:22:57 +0100 Subject: [PATCH 092/445] Name function after command: list_local_users --- src/database/admin.rs | 2 +- src/database/users.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 7799ffa2..3b347b10 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -95,7 +95,7 @@ impl Admin { match event { AdminCommand::ListLocalUsers => { - match guard.users.get_local_users() { + match guard.users.list_local_users() { Ok(users) => { let mut msg: String = format!("Found {} local user account(s):\n", users.len()); msg += &users.join("\n"); diff --git a/src/database/users.rs b/src/database/users.rs index 9fe4a4ee..645e54c0 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -127,7 +127,7 @@ impl Users { /// /// A user account is considered `local` if the length of it's password is greater then zero. #[tracing::instrument(skip(self))] - pub fn get_local_users(&self) -> Result> { + pub fn list_local_users(&self) -> Result> { let users: Vec = self .userid_password .iter() From 10f1da12bfa17c05ae219913c411fd3c27dc3a29 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 16 Jan 2022 20:57:23 +0000 Subject: [PATCH 093/445] CI: Fix cargo-test --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f47327b8..73a1a928 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -218,7 +218,7 @@ test:cargo: before_script: # - mkdir -p $CARGO_HOME - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config + - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config libclang-dev - rustup component add clippy rustfmt - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: From ee8e72f7a809cfbe58697ad69aff437d35e1404f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 Jan 2022 14:35:38 +0100 Subject: [PATCH 094/445] feat: implement server ACLs --- Cargo.lock | 48 +++++++---- Cargo.toml | 2 +- src/client_server/membership.rs | 4 +- src/client_server/message.rs | 4 +- src/client_server/state.rs | 4 +- src/client_server/to_device.rs | 4 +- src/database/abstraction/rocksdb.rs | 2 +- src/database/sending.rs | 8 +- src/database/transaction_ids.rs | 6 +- src/server_server.rs | 126 +++++++++++++++++++++++----- 10 files changed, 150 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index d297102c..5be10f14 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2086,7 +2086,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "assign", "js_int", @@ -2107,7 +2107,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "bytes", "http", @@ -2123,7 +2123,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2134,7 +2134,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "ruma-api", "ruma-common", @@ -2148,7 +2148,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "assign", "bytes", @@ -2168,7 +2168,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "indexmap", "js_int", @@ -2183,7 +2183,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "indoc", "js_int", @@ -2194,12 +2194,13 @@ dependencies = [ "serde", "serde_json", "thiserror", + "wildmatch", ] [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2210,7 +2211,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "js_int", "ruma-api", @@ -2225,7 +2226,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2234,12 +2235,13 @@ dependencies = [ "ruma-serde", "ruma-serde-macros", "serde", + "uuid", ] [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2249,7 +2251,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "thiserror", ] @@ -2257,7 +2259,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "js_int", "ruma-api", @@ -2270,7 +2272,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "js_int", "ruma-api", @@ -2285,8 +2287,9 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ + "base64 0.13.0", "bytes", "form_urlencoded", "itoa 0.4.8", @@ -2299,7 +2302,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2310,7 +2313,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2327,7 +2330,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=f8ba7f795765bf4aeb4db06849f9fdde9c162ac3#f8ba7f795765bf4aeb4db06849f9fdde9c162ac3" +source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" dependencies = [ "itertools", "js_int", @@ -3308,6 +3311,15 @@ dependencies = [ "percent-encoding", ] +[[package]] +name = "uuid" +version = "0.8.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +dependencies = [ + "getrandom 0.2.3", +] + [[package]] name = "vcpkg" version = "0.2.15" diff --git a/Cargo.toml b/Cargo.toml index c87d949c..29a090c7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -19,7 +19,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f8ba7f795765bf4aeb4db06849f9fdde9c162ac3", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "08d60b3d376b63462f769d4b9bd3bbfb560d501a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index cede51f0..70352784 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -23,7 +23,7 @@ use ruma::{ }, EventType, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue}, + serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion}, uint, EventId, RoomId, RoomVersionId, ServerName, UserId, }; @@ -787,7 +787,7 @@ async fn join_room_by_id_helper( fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<(Box, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 9705e4c0..36653fab 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -74,11 +74,11 @@ pub async fn send_message_event_route( } let mut unsigned = BTreeMap::new(); - unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); + unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::from(&body.event_type), + event_type: EventType::from(&*body.event_type), content: serde_json::from_str(body.body.body.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, unsigned: Some(unsigned), diff --git a/src/client_server/state.rs b/src/client_server/state.rs index e42694ae..c07d4825 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -44,7 +44,7 @@ pub async fn send_state_event_for_key_route( &db, sender_user, &body.room_id, - EventType::from(&body.event_type), + EventType::from(&*body.event_type), &body.body.body, // Yes, I hate it too body.state_key.to_owned(), ) @@ -86,7 +86,7 @@ pub async fn send_state_event_for_empty_key_route( &db, sender_user, &body.room_id, - EventType::from(&body.event_type), + EventType::from(&*body.event_type), &body.body.body, body.state_key.to_owned(), ) diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 177b1234..6e764deb 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -53,8 +53,8 @@ pub async fn send_event_to_device_route( serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { sender: sender_user.clone(), - ev_type: EventType::from(&body.event_type), - message_id: body.txn_id.clone(), + ev_type: EventType::from(&*body.event_type), + message_id: body.txn_id.to_string(), messages, }, )) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index adda6787..15ea9f73 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -44,7 +44,7 @@ fn db_options( db_opts.set_max_open_files(max_open_files); db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); - db_opts.optimize_level_style_compaction(cache_capacity_bytes); + db_opts.optimize_level_style_compaction(10 * 1024 * 1024); let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); db_opts.set_prefix_extractor(prefix_extractor); diff --git a/src/database/sending.rs b/src/database/sending.rs index 1e180d43..65284a4f 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -524,7 +524,7 @@ impl Sending { .unwrap(), // TODO: handle error appservice::event::push_events::v1::Request { events: &pdu_jsons, - txn_id: &base64::encode_config( + txn_id: (&*base64::encode_config( Self::calculate_hash( &events .iter() @@ -534,7 +534,7 @@ impl Sending { .collect::>(), ), base64::URL_SAFE_NO_PAD, - ), + )).into(), }, ) .await @@ -682,7 +682,7 @@ impl Sending { pdus: &pdu_jsons, edus: &edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: &base64::encode_config( + transaction_id: (&*base64::encode_config( Self::calculate_hash( &events .iter() @@ -692,7 +692,7 @@ impl Sending { .collect::>(), ), base64::URL_SAFE_NO_PAD, - ), + )).into(), }, ) .await diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index f3467572..d576083a 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::Result; -use ruma::{DeviceId, UserId}; +use ruma::{DeviceId, UserId, identifiers::TransactionId}; use super::abstraction::Tree; @@ -14,7 +14,7 @@ impl TransactionIds { &self, user_id: &UserId, device_id: Option<&DeviceId>, - txn_id: &str, + txn_id: &TransactionId, data: &[u8], ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); @@ -32,7 +32,7 @@ impl TransactionIds { &self, user_id: &UserId, device_id: Option<&DeviceId>, - txn_id: &str, + txn_id: &TransactionId, ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); diff --git a/src/server_server.rs b/src/server_server.rs index c76afd34..5cd43d81 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -42,6 +42,7 @@ use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, room::{ + server_acl::RoomServerAclEventContent, create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, }, @@ -49,7 +50,7 @@ use ruma::{ }, int, receipt::ReceiptType, - serde::JsonObject, + serde::{Base64, JsonObject}, signatures::{CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, @@ -551,7 +552,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { .try_into() .expect("found invalid server signing keys in DB"), VerifyKey { - key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), + key: Base64::new(db.globals.keypair().public_key().to_vec()), }, ); let mut response = serde_json::from_slice( @@ -740,6 +741,8 @@ pub async fn send_transaction_message_route( } }; + acl_check(&body.origin, &room_id, &db)?; + let mutex = Arc::clone( db.globals .roomid_mutex_federation @@ -854,7 +857,7 @@ pub async fn send_transaction_message_route( // Check if this is a new transaction id if db .transaction_ids - .existing_txnid(&sender, None, &message_id)? + .existing_txnid(&sender, None, (&*message_id).into())? .is_some() { continue; @@ -902,7 +905,7 @@ pub async fn send_transaction_message_route( // Save transaction id with empty data db.transaction_ids - .add_txnid(&sender, None, &message_id, &[])?; + .add_txnid(&sender, None, (&*message_id).into(), &[])?; } Edu::_Custom(_) => {} } @@ -948,7 +951,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( value: BTreeMap, is_timeline_event: bool, db: &'a Database, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>>, ) -> Result>, String> { match db.rooms.exists(room_id) { Ok(true) => {} @@ -1123,7 +1126,7 @@ fn handle_outlier_pdu<'a>( room_id: &'a RoomId, value: BTreeMap, db: &'a Database, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json @@ -1285,7 +1288,7 @@ async fn upgrade_outlier_to_timeline_pdu( origin: &ServerName, db: &Database, room_id: &RoomId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>>, ) -> Result>, String> { if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); @@ -1827,7 +1830,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, - pub_key_map: &'a RwLock>>, + pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { Box::pin(async move { let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { @@ -1966,9 +1969,9 @@ pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, signature_ids: Vec, -) -> Result> { +) -> Result> { let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); let permit = db .globals @@ -2355,8 +2358,11 @@ pub fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, room_id)? { - return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); + if !db.rooms.server_in_room(sender_servername, &room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); } Ok(get_event::v1::Response { @@ -2395,6 +2401,8 @@ pub fn get_missing_events_route( )); } + acl_check(sender_servername, &body.room_id, &db)?; + let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); @@ -2464,6 +2472,15 @@ pub fn get_event_authorization_route( .as_ref() .expect("server is authenticated"); + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + let event = db .rooms .get_pdu_json(&body.event_id)? @@ -2477,10 +2494,6 @@ pub fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, room_id)? { - return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); - } - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; Ok(get_event_authorization::v1::Response { @@ -2520,6 +2533,8 @@ pub fn get_room_state_route( )); } + acl_check(sender_servername, &body.room_id, &db)?; + let shortstatehash = db .rooms .pdu_shortstatehash(&body.event_id)? @@ -2583,6 +2598,8 @@ pub fn get_room_state_ids_route( )); } + acl_check(sender_servername, &body.room_id, &db)?; + let shortstatehash = db .rooms .pdu_shortstatehash(&body.event_id)? @@ -2626,10 +2643,17 @@ pub fn create_join_event_template_route( if !db.rooms.exists(&body.room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, - "Server is not in room.", + "Room is unknown to this server.", )); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + acl_check(sender_servername, &body.room_id, &db)?; + let prev_events: Vec<_> = db .rooms .get_pdu_leaves(&body.room_id)? @@ -2782,6 +2806,7 @@ pub fn create_join_event_template_route( async fn create_join_event( db: &DatabaseGuard, + sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, ) -> Result { @@ -2789,6 +2814,15 @@ async fn create_join_event( return Err(Error::bad_config("Federation is disabled.")); } + if !db.rooms.exists(room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server.", + )); + } + + acl_check(sender_servername, room_id, &db)?; + // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = db .rooms @@ -2888,7 +2922,12 @@ pub async fn create_join_event_v1_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { - let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v1::Response { room_state }.into()) } @@ -2905,7 +2944,12 @@ pub async fn create_join_event_v2_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { - let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v2::Response { room_state }.into()) } @@ -2926,6 +2970,13 @@ pub async fn create_invite_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + acl_check(sender_servername, &body.room_id, &db)?; + if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { @@ -3199,7 +3250,7 @@ pub async fn claim_keys_route( #[tracing::instrument(skip(event, pub_key_map, db))] pub(crate) async fn fetch_required_signing_keys( event: &BTreeMap, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { let signatures = event @@ -3253,7 +3304,7 @@ fn get_server_keys_from_cache( pdu: &RawJsonValue, servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, db: &Database, ) -> Result<()> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { @@ -3306,7 +3357,7 @@ fn get_server_keys_from_cache( let signature_ids = signature_object.keys().cloned().collect::>(); let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") @@ -3339,7 +3390,7 @@ fn get_server_keys_from_cache( pub(crate) async fn fetch_join_signing_keys( event: &create_join_event::v2::Response, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, + pub_key_map: &RwLock>>, db: &Database, ) -> Result<()> { let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = @@ -3439,6 +3490,35 @@ pub(crate) async fn fetch_join_signing_keys( Ok(()) } +/// Returns Ok if the acl allows the server +fn acl_check( + server_name: &ServerName, + room_id: &RoomId, + db: &Database, +) -> Result<()> { + let acl_event = match db + .rooms + .room_state_get(room_id, &EventType::RoomServerAcl, "")? { + Some(acl) => acl, + None => return Ok(()), + }; + + let acl_event_content: RoomServerAclEventContent = match + serde_json::from_str(acl_event.content.get()) { + Ok(content) => content, + Err(_) => { + warn!("Invalid ACL event"); + return Ok(()); + } + }; + + if acl_event_content.is_allowed(server_name) { + Ok(()) + } else { + Err(Error::BadRequest(ErrorKind::Forbidden, "Server was denied by ACL")) + } +} + #[cfg(test)] mod tests { use super::{add_port_to_hostname, get_ip_with_port, FedDest}; From 8c90e7adfb0d06164d17921e6e686cdaab0d8f1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 Jan 2022 14:39:37 +0100 Subject: [PATCH 095/445] refactor: fix warnings --- src/database/abstraction/rocksdb.rs | 27 ++++++--------------------- src/database/sending.rs | 6 ++++-- src/database/transaction_ids.rs | 2 +- src/server_server.rs | 26 +++++++++++++------------- 4 files changed, 24 insertions(+), 37 deletions(-) diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 15ea9f73..d6157135 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -4,7 +4,6 @@ use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, - cache_capacity_bytes: usize, max_open_files: i32, cache: rocksdb::Cache, old_cfs: Vec, @@ -17,11 +16,7 @@ pub struct RocksDbEngineTree<'a> { write_lock: RwLock<()>, } -fn db_options( - cache_capacity_bytes: usize, - max_open_files: i32, - rocksdb_cache: &rocksdb::Cache, -) -> rocksdb::Options { +fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { let mut block_based_options = rocksdb::BlockBasedOptions::default(); block_based_options.set_block_cache(rocksdb_cache); @@ -57,11 +52,7 @@ impl DatabaseEngine for Arc { let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); - let db_opts = db_options( - cache_capacity_bytes, - config.rocksdb_max_open_files, - &rocksdb_cache, - ); + let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache); let cfs = rocksdb::DBWithThreadMode::::list_cf( &db_opts, @@ -75,18 +66,13 @@ impl DatabaseEngine for Arc { cfs.iter().map(|name| { rocksdb::ColumnFamilyDescriptor::new( name, - db_options( - cache_capacity_bytes, - config.rocksdb_max_open_files, - &rocksdb_cache, - ), + db_options(config.rocksdb_max_open_files, &rocksdb_cache), ) }), )?; Ok(Arc::new(Engine { rocks: db, - cache_capacity_bytes, max_open_files: config.rocksdb_max_open_files, cache: rocksdb_cache, old_cfs: cfs, @@ -96,10 +82,9 @@ impl DatabaseEngine for Arc { fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist - let _ = self.rocks.create_cf( - name, - &db_options(self.cache_capacity_bytes, self.max_open_files, &self.cache), - ); + let _ = self + .rocks + .create_cf(name, &db_options(self.max_open_files, &self.cache)); } Ok(Arc::new(RocksDbEngineTree { diff --git a/src/database/sending.rs b/src/database/sending.rs index 65284a4f..69f7c444 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -534,7 +534,8 @@ impl Sending { .collect::>(), ), base64::URL_SAFE_NO_PAD, - )).into(), + )) + .into(), }, ) .await @@ -692,7 +693,8 @@ impl Sending { .collect::>(), ), base64::URL_SAFE_NO_PAD, - )).into(), + )) + .into(), }, ) .await diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index d576083a..12b838ba 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::Result; -use ruma::{DeviceId, UserId, identifiers::TransactionId}; +use ruma::{identifiers::TransactionId, DeviceId, UserId}; use super::abstraction::Tree; diff --git a/src/server_server.rs b/src/server_server.rs index 5cd43d81..54ae0251 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -42,9 +42,9 @@ use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, room::{ - server_acl::RoomServerAclEventContent, create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, + server_acl::RoomServerAclEventContent, }, AnyEphemeralRoomEvent, EventType, }, @@ -3491,20 +3491,17 @@ pub(crate) async fn fetch_join_signing_keys( } /// Returns Ok if the acl allows the server -fn acl_check( - server_name: &ServerName, - room_id: &RoomId, - db: &Database, -) -> Result<()> { +fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { let acl_event = match db .rooms - .room_state_get(room_id, &EventType::RoomServerAcl, "")? { - Some(acl) => acl, - None => return Ok(()), - }; + .room_state_get(room_id, &EventType::RoomServerAcl, "")? + { + Some(acl) => acl, + None => return Ok(()), + }; - let acl_event_content: RoomServerAclEventContent = match - serde_json::from_str(acl_event.content.get()) { + let acl_event_content: RoomServerAclEventContent = + match serde_json::from_str(acl_event.content.get()) { Ok(content) => content, Err(_) => { warn!("Invalid ACL event"); @@ -3515,7 +3512,10 @@ fn acl_check( if acl_event_content.is_allowed(server_name) { Ok(()) } else { - Err(Error::BadRequest(ErrorKind::Forbidden, "Server was denied by ACL")) + Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server was denied by ACL", + )) } } From 03b174335cfc472c3ecaba7068ead74f0e2268be Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 Jan 2022 14:46:53 +0100 Subject: [PATCH 096/445] improvement: lower default pdu cache capacity --- src/database.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database.rs b/src/database.rs index fd7a1451..1997dc0a 100644 --- a/src/database.rs +++ b/src/database.rs @@ -134,7 +134,7 @@ fn default_rocksdb_max_open_files() -> i32 { } fn default_pdu_cache_capacity() -> u32 { - 1_000_000 + 150_000 } fn default_cleanup_second_interval() -> u32 { From fc39b3447c5add8b8c8d2b188751969d752d1ee1 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 17 Jan 2022 19:43:45 +0100 Subject: [PATCH 097/445] Little bit of refactoring --- src/database/users.rs | 27 +++++++++------------------ 1 file changed, 9 insertions(+), 18 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index 83c1520e..e608673a 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -10,7 +10,6 @@ use ruma::{ }; use std::{collections::BTreeMap, convert::TryInto, mem, sync::Arc}; use tracing::warn; -use tracing::info; use super::abstraction::Tree; @@ -137,31 +136,23 @@ impl Users { Ok(users) } - /// A private helper to avoid double filtering the iterator in get_local_users(). + /// Will only return with Some(username) if the password was not empty and the + /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped - /// and the error will be logged. TODO: add error cause. + /// and the error will be logged. #[tracing::instrument(skip(self))] fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { // A valid password is not empty - if password.len() > 0 { + if password.is_empty() { + None + } else { match utils::string_from_bytes(username) { - Ok(u) => { - info!("list_local_users_test: found user {}", u); - Some(u) - }, - Err(_) => { - // let msg: String = format!( - // "Failed to parse username while calling get_local_users(): {}", - // e.to_string() - // ); - Error::bad_database( - "Failed to parse username while calling get_username_on_valid_password", - ); + Ok(u) => Some(u), + Err(e) => { + warn!("Failed to parse username while calling get_local_users(): {}", e.to_string()); None } } - } else { - None } } From fd6427a83fef08b7f8f6f74d3a4c88af3171aa77 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 17 Jan 2022 22:34:34 +0100 Subject: [PATCH 098/445] Update/Revert code comment --- src/database/users.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index e608673a..9b986d4e 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -77,8 +77,6 @@ impl Users { } /// Returns the number of users registered on this server. - /// It really returns all users, not only real ones with a - /// password to login but also bridge puppets... #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { Ok(self.userid_password.iter().count()) From 53de3509087f46b6a45ca20d27e8fa2884269535 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 17 Jan 2022 23:24:27 +0100 Subject: [PATCH 099/445] fix: less load when lazy loading --- src/client_server/sync.rs | 53 ++++++++++++++++++++------------------- 1 file changed, 27 insertions(+), 26 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index bd2f48a3..14aac3a1 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -453,38 +453,39 @@ async fn sync_helper( let joined_since_last_sync = since_sender_member .map_or(true, |member| member.membership != MembershipState::Join); - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - - let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; - let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); - for (key, id) in current_state_ids { - if body.full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match db.rooms.get_pdu(&id)? { - Some(pdu) => pdu, - None => { - error!("Pdu in state not found: {}", id); - continue; - } - }; - - if pdu.kind == EventType::RoomMember { - match UserId::parse( - pdu.state_key - .as_ref() - .expect("State event has state key") - .clone(), - ) { - Ok(state_key_userid) => { - lazy_loaded.insert(state_key_userid); + if since_shortstatehash != current_shortstatehash { + let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; + let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; + + for (key, id) in current_state_ids { + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + if pdu.kind == EventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(e) => error!("Invalid state key for member event: {}", e), } - Err(e) => error!("Invalid state key for member event: {}", e), } - } - state_events.push(pdu); + state_events.push(pdu); + } } } From 13a48c45776de19912ecd040a6434c75152802f7 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 18 Jan 2022 21:04:44 +0100 Subject: [PATCH 100/445] Clean up mod and use statements in lib.rs and main.rs --- src/lib.rs | 10 ++++++---- src/main.rs | 22 ++++------------------ 2 files changed, 10 insertions(+), 22 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 82b8f340..745eb394 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,21 +7,23 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -pub mod appservice_server; -pub mod client_server; +use std::ops::Deref; + mod database; mod error; mod pdu; mod ruma_wrapper; -pub mod server_server; mod utils; +pub mod appservice_server; +pub mod client_server; +pub mod server_server; + pub use database::{Config, Database}; pub use error::{Error, Result}; pub use pdu::PduEvent; pub use rocket::Config as RocketConfig; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use std::ops::Deref; pub struct State<'r, T: Send + Sync + 'static>(pub &'r T); diff --git a/src/main.rs b/src/main.rs index 56faa3e7..d9bbc240 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,27 +7,9 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -pub mod appservice_server; -pub mod client_server; -pub mod server_server; - -mod database; -mod error; -mod pdu; -mod ruma_wrapper; -mod utils; - use std::sync::Arc; -use database::Config; -pub use database::Database; -pub use error::{Error, Result}; use opentelemetry::trace::{FutureExt, Tracer}; -pub use pdu::PduEvent; -pub use rocket::State; -use ruma::api::client::error::ErrorKind; -pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; - use rocket::{ catch, catchers, figment::{ @@ -36,9 +18,13 @@ use rocket::{ }, routes, Request, }; +use ruma::api::client::error::ErrorKind; use tokio::sync::RwLock; use tracing_subscriber::{prelude::*, EnvFilter}; +pub use conduit::*; // Re-export everything from the library crate +pub use rocket::State; + fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { rocket::custom(config) .manage(data) From c6277c72a1f75d889b47708769adf376cac9d1ea Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Tue, 18 Jan 2022 21:05:40 +0100 Subject: [PATCH 101/445] Fix warnings in database::abstraction --- src/database/abstraction.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 17bd971f..321b064f 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -23,12 +23,12 @@ pub trait DatabaseEngine: Send + Sync { where Self: Sized; fn open_tree(&self, name: &'static str) -> Result>; - fn flush(self: &Self) -> Result<()>; - fn cleanup(self: &Self) -> Result<()> { + fn flush(&self) -> Result<()>; + fn cleanup(&self) -> Result<()> { Ok(()) } - fn memory_usage(self: &Self) -> Result { - Ok("Current database engine does not support memory usage reporting.".to_string()) + fn memory_usage(&self) -> Result { + Ok("Current database engine does not support memory usage reporting.".to_owned()) } } From d4eb3e3295ee1b0947b66d1d45ef10bb4d152839 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 19 Jan 2022 07:09:25 +0100 Subject: [PATCH 102/445] fix: rocksdb does not use zstd compression unless we disable everything else --- Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 29a090c7..32233305 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -78,7 +78,8 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", features = ["multi-threaded-cf"], optional = true } +rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } + thread_local = "1.1.3" # used for TURN server authentication hmac = "0.11.0" From a0fc5eba72a7b364cfe91d5b188b136fa555b7e1 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 19 Jan 2022 23:56:55 +0100 Subject: [PATCH 103/445] Remove unnecessary Result --- src/database/uiaa.rs | 7 +++---- src/ruma_wrapper.rs | 13 +++++-------- 2 files changed, 8 insertions(+), 12 deletions(-) diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 5e11467e..b0c8d6dd 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -166,13 +166,12 @@ impl Uiaa { user_id: &UserId, device_id: &DeviceId, session: &str, - ) -> Result> { - Ok(self - .userdevicesessionid_uiaarequest + ) -> Option { + self.userdevicesessionid_uiaarequest .read() .unwrap() .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) - .map(|j| j.to_owned())) + .map(|j| j.to_owned()) } fn update_uiaa_session( diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 4b8d5dea..1bd921d9 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -296,14 +296,11 @@ where .and_then(|auth| auth.get("session")) .and_then(|session| session.as_str()) .and_then(|session| { - db.uiaa - .get_uiaa_request( - &user_id, - &sender_device.clone().unwrap_or_else(|| "".into()), - session, - ) - .ok() - .flatten() + db.uiaa.get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) }) { for (key, value) in initial_request { From 756a41f22d24c89682eea826e138f8c3896433fb Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 00:10:39 +0100 Subject: [PATCH 104/445] Fix rustc / clippy warnings --- src/client_server/context.rs | 15 +++++++-------- src/client_server/keys.rs | 2 +- src/client_server/message.rs | 14 +++++++------- src/client_server/profile.rs | 4 ++-- src/database.rs | 30 ++++++++++++------------------ src/database/admin.rs | 2 +- src/database/rooms.rs | 27 +++++++++++++-------------- src/server_server.rs | 6 +++--- 8 files changed, 46 insertions(+), 54 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 94a44e39..e1177661 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -3,8 +3,7 @@ use ruma::{ api::client::{error::ErrorKind, r0::context::get_context}, events::EventType, }; -use std::collections::HashSet; -use std::convert::TryFrom; +use std::{collections::HashSet, convert::TryFrom}; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -55,8 +54,8 @@ pub async fn get_context_route( ))?; if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &base_event.sender, )? { @@ -79,8 +78,8 @@ pub async fn get_context_route( for (_, event) in &events_before { if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &event.sender, )? { @@ -112,8 +111,8 @@ pub async fn get_context_route( for (_, event) in &events_after { if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &event.sender, )? { diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index be0675d8..e7aec26b 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -272,7 +272,7 @@ pub async fn get_key_changes_route( device_list_updates.extend( db.users .keys_changed( - &sender_user.to_string(), + sender_user.as_str(), body.from .parse() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 36653fab..7d904f90 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -139,7 +139,7 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); db.rooms - .lazy_load_confirm_delivery(&sender_user, &sender_device, &body.room_id, from)?; + .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); @@ -168,8 +168,8 @@ pub async fn get_message_events_route( for (_, event) in &events_after { if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &event.sender, )? { @@ -205,8 +205,8 @@ pub async fn get_message_events_route( for (_, event) in &events_before { if !db.rooms.lazy_load_was_sent_before( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, &event.sender, )? { @@ -239,8 +239,8 @@ pub async fn get_message_events_route( if let Some(next_token) = next_token { db.rooms.lazy_load_mark_sent( - &sender_user, - &sender_device, + sender_user, + sender_device, &body.room_id, lazy_loaded, next_token, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 29b1ae87..71e61da3 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -52,7 +52,7 @@ pub async fn set_displayname_route( .room_state_get( &room_id, &EventType::RoomMember, - &sender_user.to_string(), + sender_user.as_str(), )? .ok_or_else(|| { Error::bad_database( @@ -195,7 +195,7 @@ pub async fn set_avatar_url_route( .room_state_get( &room_id, &EventType::RoomMember, - &sender_user.to_string(), + sender_user.as_str(), )? .ok_or_else(|| { Error::bad_database( diff --git a/src/database.rs b/src/database.rs index 1997dc0a..7a4ddc66 100644 --- a/src/database.rs +++ b/src/database.rs @@ -212,28 +212,22 @@ impl Database { return Ok(()); } - if sled_exists { - if config.database_backend != "sled" { - return Err(Error::bad_config( - "Found sled at database_path, but is not specified in config.", - )); - } + if sled_exists && config.database_backend != "sled" { + return Err(Error::bad_config( + "Found sled at database_path, but is not specified in config.", + )); } - if sqlite_exists { - if config.database_backend != "sqlite" { - return Err(Error::bad_config( - "Found sqlite at database_path, but is not specified in config.", - )); - } + if sqlite_exists && config.database_backend != "sqlite" { + return Err(Error::bad_config( + "Found sqlite at database_path, but is not specified in config.", + )); } - if rocksdb_exists { - if config.database_backend != "rocksdb" { - return Err(Error::bad_config( - "Found rocksdb at database_path, but is not specified in config.", - )); - } + if rocksdb_exists && config.database_backend != "rocksdb" { + return Err(Error::bad_config( + "Found rocksdb at database_path, but is not specified in config.", + )); } Ok(()) diff --git a/src/database/admin.rs b/src/database/admin.rs index 7d2301d9..bf38bd8c 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -118,7 +118,7 @@ impl Admin { if let Ok(response) = guard._db.memory_usage() { send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock); } else { - send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_string()), guard, &state_lock); + send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_owned()), guard, &state_lock); } } AdminCommand::SendMessage(message) => { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0ba6c9ba..c9a3c202 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2727,7 +2727,7 @@ impl Rooms { let state_lock = mutex_state.lock().await; let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? + self.room_state_get(room_id, &EventType::RoomMember, user_id.as_str())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -3462,8 +3462,7 @@ impl Rooms { &key[0].to_be_bytes(), &chain .iter() - .map(|s| s.to_be_bytes().to_vec()) - .flatten() + .flat_map(|s| s.to_be_bytes().to_vec()) .collect::>(), )?; } @@ -3484,11 +3483,11 @@ impl Rooms { ) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - key.extend_from_slice(&device_id.as_bytes()); + key.extend_from_slice(device_id.as_bytes()); key.push(0xff); - key.extend_from_slice(&room_id.as_bytes()); + key.extend_from_slice(room_id.as_bytes()); key.push(0xff); - key.extend_from_slice(&ll_user.as_bytes()); + key.extend_from_slice(ll_user.as_bytes()); Ok(self.lazyloadedids.get(&key)?.is_some()) } @@ -3528,14 +3527,14 @@ impl Rooms { )) { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&device_id.as_bytes()); + prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); - prefix.extend_from_slice(&room_id.as_bytes()); + prefix.extend_from_slice(room_id.as_bytes()); prefix.push(0xff); for ll_id in user_ids { let mut key = prefix.clone(); - key.extend_from_slice(&ll_id.as_bytes()); + key.extend_from_slice(ll_id.as_bytes()); self.lazyloadedids.insert(&key, &[])?; } } @@ -3546,15 +3545,15 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn lazy_load_reset( &self, - user_id: &Box, - device_id: &Box, - room_id: &Box, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, ) -> Result<()> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&device_id.as_bytes()); + prefix.extend_from_slice(device_id.as_bytes()); prefix.push(0xff); - prefix.extend_from_slice(&room_id.as_bytes()); + prefix.extend_from_slice(room_id.as_bytes()); prefix.push(0xff); for (key, _) in self.lazyloadedids.scan_prefix(prefix) { diff --git a/src/server_server.rs b/src/server_server.rs index 54ae0251..9129951b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1938,7 +1938,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( match handle_outlier_pdu( origin, create_event, - &next_id, + next_id, room_id, value.clone(), db, @@ -2358,7 +2358,7 @@ pub fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, &room_id)? { + if !db.rooms.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", @@ -2821,7 +2821,7 @@ async fn create_join_event( )); } - acl_check(sender_servername, room_id, &db)?; + acl_check(sender_servername, room_id, db)?; // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = db From 6e322716caf6f9181bf21444b552bf05d5f5a774 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 12:29:10 +0100 Subject: [PATCH 105/445] Delete rust-toolchain file --- rust-toolchain | 1 - 1 file changed, 1 deletion(-) delete mode 100644 rust-toolchain diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 74df8b16..00000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -1.53 From 5afb27a5a9ae887dea042e3ca9f0ecef98feff47 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 12:29:24 +0100 Subject: [PATCH 106/445] Use latest stable for Docker image --- Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5812fdf9..b629690d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.53-alpine AS builder +FROM docker.io/rust:1.58-alpine AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies @@ -38,7 +38,7 @@ FROM docker.io/alpine:3.15.0 AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. +# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # Conduit needs: @@ -78,4 +78,4 @@ WORKDIR /srv/conduit # Run Conduit and print backtraces on panics ENV RUST_BACKTRACE=1 -ENTRYPOINT [ "/srv/conduit/conduit" ] \ No newline at end of file +ENTRYPOINT [ "/srv/conduit/conduit" ] From ff5fec9e74b4ed12c4dae579344a94f1c1f22f29 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 12:29:52 +0100 Subject: [PATCH 107/445] Raise minimum supported Rust version to 1.56 --- Cargo.toml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 29a090c7..b6a2a2b7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,8 @@ homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.2.0" -edition = "2018" +rust-version = "1.56" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From 6bb1081b7127a38cdc85614e4250f52b557753c8 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 13:13:14 +0100 Subject: [PATCH 108/445] Use BTreeMap::into_values Stable under new MSRV. --- src/database/users.rs | 20 +++++++++++--------- 1 file changed, 11 insertions(+), 9 deletions(-) diff --git a/src/database/users.rs b/src/database/users.rs index c4fcee3d..69a277c6 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -531,11 +531,11 @@ impl Users { prefix.push(0xff); // Master key - let master_key_map = master_key + let mut master_key_ids = master_key .deserialize() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? - .keys; - let mut master_key_ids = master_key_map.values(); + .keys + .into_values(); let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -560,13 +560,14 @@ impl Users { // Self-signing key if let Some(self_signing_key) = self_signing_key { - let self_signing_key_map = self_signing_key + let mut self_signing_key_ids = self_signing_key .deserialize() .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") })? - .keys; - let mut self_signing_key_ids = self_signing_key_map.values(); + .keys + .into_values(); + let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Self signing key contained no key.", @@ -593,13 +594,14 @@ impl Users { // User-signing key if let Some(user_signing_key) = user_signing_key { - let user_signing_key_map = user_signing_key + let mut user_signing_key_ids = user_signing_key .deserialize() .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") })? - .keys; - let mut user_signing_key_ids = user_signing_key_map.values(); + .keys + .into_values(); + let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "User signing key contained no key.", From 8d81c1c0722ad2f608adea44d7b4ceb1a8f645ae Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 13:23:58 +0100 Subject: [PATCH 109/445] Use MSRV for build CI jobs The test job will use the latest stable so all stable lints are included. --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 73a1a928..cdc1d4cb 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,7 +21,7 @@ variables: - if: '$CI_COMMIT_BRANCH == "next"' - if: "$CI_COMMIT_TAG" interruptible: true - image: "rust:latest" + image: "rust:1.56" tags: ["docker"] variables: CARGO_PROFILE_RELEASE_LTO: "true" From e378bc4a2c5590047b42cd4f8e244396125cb428 Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Tue, 18 Jan 2022 13:53:17 +0200 Subject: [PATCH 110/445] Refactor admin commands to use structopt --- APPSERVICES.md | 8 +- Cargo.toml | 3 + src/database/admin.rs | 302 +++++++++++++++++++++++------------------- 3 files changed, 175 insertions(+), 138 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index 894bc6f4..257166eb 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -18,7 +18,7 @@ First, go into the #admins room of your homeserver. The first person that registered on the homeserver automatically joins it. Then send a message into the room like this: - @conduit:your.server.name: register_appservice + @conduit:your.server.name: register-appservice ``` paste the @@ -31,7 +31,7 @@ the room like this: ``` You can confirm it worked by sending a message like this: -`@conduit:your.server.name: list_appservices` +`@conduit:your.server.name: list-appservices` The @conduit bot should answer with `Appservices (1): your-bridge` @@ -46,9 +46,9 @@ could help. To remove an appservice go to your admin room and execute -```@conduit:your.server.name: unregister_appservice ``` +```@conduit:your.server.name: unregister-appservice ``` -where `` one of the output of `list_appservices`. +where `` one of the output of `list-appservices`. ### Tested appservices diff --git a/Cargo.toml b/Cargo.toml index c87d949c..08afe1f4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,6 +83,9 @@ thread_local = "1.1.3" # used for TURN server authentication hmac = "0.11.0" sha-1 = "0.9.8" +# used for conduit's CLI and admin room command parsing +structopt = { version = "0.3.25", default-features = false } +pulldown-cmark = "0.9.1" [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] diff --git a/src/database/admin.rs b/src/database/admin.rs index 518d7587..55724db5 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -5,6 +5,7 @@ use crate::{ pdu::PduBuilder, server_server, Database, PduEvent, }; +use regex::Regex; use rocket::{ futures::{channel::mpsc, stream::StreamExt}, http::RawStr, @@ -14,6 +15,7 @@ use ruma::{ EventId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; +use structopt::StructOpt; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; use tracing::warn; @@ -146,78 +148,98 @@ impl Admin { } pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) -> AdminCommand { - let mut parts = command_line.split_whitespace().skip(1); + let mut argv: Vec<_> = command_line.split_whitespace().skip(1).collect(); - let command_name = match parts.next() { - Some(command) => command, + let command_name = match argv.get(0) { + Some(command) => *command, None => { - let message = "No command given. Use help for a list of commands."; + let markdown_message = "No command given. Use `help` for a list of commands."; + let html_message = markdown_to_html(&markdown_message); + return AdminCommand::SendMessage(RoomMessageEventContent::text_html( - html_to_markdown(message), - message, + markdown_message, + html_message, )); } }; - let args: Vec<_> = parts.collect(); + // Backwards compatibility with `register_appservice`-style commands + let command_with_dashes; + if command_line.contains("_") { + command_with_dashes = command_name.replace("_", "-"); + argv[0] = &command_with_dashes; + } - match try_parse_admin_command(db, command_name, args, body) { + match try_parse_admin_command(db, argv, body) { Ok(admin_command) => admin_command, Err(error) => { - let message = format!( - "Encountered error while handling {} command:\n\ -
          {}
          ", + let markdown_message = format!( + "Encountered an error while handling the `{}` command:\n\ + ```\n{}\n```", command_name, error, ); + let html_message = markdown_to_html(&markdown_message); AdminCommand::SendMessage(RoomMessageEventContent::text_html( - html_to_markdown(&message), - message, + markdown_message, + html_message, )) } } } -// Helper for `RoomMessageEventContent::text_html`, which needs the content as -// both markdown and HTML. -fn html_to_markdown(text: &str) -> String { - text.replace("

          ", "") - .replace("

          ", "\n") - .replace("
          ", "```\n")
          -        .replace("
          ", "\n```") - .replace("", "`") - .replace("", "`") - .replace("
        • ", "* ") - .replace("
        • ", "") - .replace("
            \n", "") - .replace("
          \n", "") +#[derive(StructOpt)] +enum AdminCommands { + #[structopt(verbatim_doc_comment)] + /// Register a bridge using its registration YAML + /// + /// This command needs a YAML generated by an appservice (such as a mautrix + /// bridge), which must be provided in a code-block below the command. + /// + /// Example: + /// ```` + /// @conduit:example.com: register-appservice + /// ``` + /// yaml content here + /// ``` + /// ```` + RegisterAppservice, + /// Unregister a bridge using its ID + UnregisterAppservice { appservice_identifier: String }, + /// List all the currently registered bridges + ListAppservices, + /// Get the auth_chain of a PDU + GetAuthChain { event_id: Box }, + /// Parse and print a PDU from a JSON + ParsePdu, + /// Retrieve and print a PDU by ID from the Conduit database + GetPdu { event_id: Box }, + /// Print database memory usage statistics + DatabaseMemoryUsage, } -const HELP_TEXT: &'static str = r#" -

          The following commands are available:

          -
            -
          • register_appservice: Register a bridge using its registration YAML
          • -
          • unregister_appservice: Unregister a bridge using its ID
          • -
          • list_appservices: List all the currently registered bridges
          • -
          • get_auth_chain: Get the `auth_chain` of a PDU
          • -
          • parse_pdu: Parse and print a PDU from a JSON
          • -
          • get_pdu: Retrieve and print a PDU by ID from the Conduit database
          • -
          • database_memory_usage: Print database memory usage statistics
          • -
              -"#; - pub fn try_parse_admin_command( db: &Database, - command: &str, - args: Vec<&str>, + mut argv: Vec<&str>, body: Vec<&str>, ) -> Result { - let command = match command { - "help" => AdminCommand::SendMessage(RoomMessageEventContent::text_html( - html_to_markdown(HELP_TEXT), - HELP_TEXT, - )), - "register_appservice" => { + argv.insert(0, "@conduit:example.com:"); + let command = match AdminCommands::from_iter_safe(argv) { + Ok(command) => command, + Err(error) => { + println!("Before:\n{}\n", error.to_string()); + let markdown_message = usage_to_markdown(&error.to_string()) + .replace("example.com", db.globals.server_name().as_str()); + let html_message = markdown_to_html(&markdown_message); + + return Ok(AdminCommand::SendMessage( + RoomMessageEventContent::text_html(markdown_message, html_message), + )); + } + }; + + let admin_command = match command { + AdminCommands::RegisterAppservice => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let appservice_config = body[1..body.len() - 1].join("\n"); let parsed_config = serde_yaml::from_str::(&appservice_config); @@ -233,47 +255,35 @@ pub fn try_parse_admin_command( )) } } - "unregister_appservice" => { - if args.len() == 1 { - AdminCommand::UnregisterAppservice(args[0].to_owned()) + AdminCommands::UnregisterAppservice { + appservice_identifier, + } => AdminCommand::UnregisterAppservice(appservice_identifier), + AdminCommands::ListAppservices => AdminCommand::ListAppservices, + AdminCommands::GetAuthChain { event_id } => { + let event_id = Arc::::from(event_id); + if let Some(event) = db.rooms.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; + let start = Instant::now(); + let count = server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); + let elapsed = start.elapsed(); + return Ok(AdminCommand::SendMessage( + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )), + )); } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Missing appservice identifier", - )) + AdminCommand::SendMessage(RoomMessageEventContent::text_plain("Event not found.")) } } - "list_appservices" => AdminCommand::ListAppservices, - "get_auth_chain" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::parse_arc(args[0]) { - if let Some(event) = db.rooms.get_pdu_json(&event_id)? { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { - Error::bad_database("Invalid room id field in event in database") - })?; - let start = Instant::now(); - let count = - server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); - let elapsed = start.elapsed(); - return Ok(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )), - )); - } - } - } - - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Usage: get_auth_chain ", - )) - } - "parse_pdu" => { + AdminCommands::ParsePdu => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { @@ -312,59 +322,83 @@ pub fn try_parse_admin_command( )) } } - "get_pdu" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::parse(args[0]) { - let mut outlier = false; - let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; - if pdu_json.is_none() { - outlier = true; - pdu_json = db.rooms.get_pdu_json(&event_id)?; - } - match pdu_json { - Some(json) => { - let json_text = serde_json::to_string_pretty(&json) - .expect("canonical json is valid json"); - AdminCommand::SendMessage( - RoomMessageEventContent::text_html( - format!("{}\n```json\n{}\n```", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, json_text), - format!("

              {}

              \n
              {}\n
              \n", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) - ), - ) - } - None => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "PDU not found.", - )), - } - } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Event ID could not be parsed.", + AdminCommands::GetPdu { event_id } => { + let mut outlier = false; + let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = db.rooms.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + let json_text = + serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + AdminCommand::SendMessage(RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + json_text + ), + format!( + "

              {}

              \n
              {}\n
              \n", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + RawStr::new(&json_text).html_escape() + ), )) } - } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Usage: get_pdu ", - )) + None => { + AdminCommand::SendMessage(RoomMessageEventContent::text_plain("PDU not found.")) + } } } - "database_memory_usage" => AdminCommand::ShowMemoryUsage, - _ => { - let message = format!( - "Unrecognized command {}, try help for a list of commands.", - command, - ); - AdminCommand::SendMessage(RoomMessageEventContent::text_html( - html_to_markdown(&message), - message, - )) - } + AdminCommands::DatabaseMemoryUsage => AdminCommand::ShowMemoryUsage, }; - Ok(command) + Ok(admin_command) +} + +fn usage_to_markdown(text: &str) -> String { + // For the conduit admin room, subcommands become main commands + let text = text.replace("SUBCOMMAND", "COMMAND"); + let text = text.replace("subcommand", "command"); + + // Put the first line (command name and version text) on its own paragraph + let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "*$1*\n\n"); + + // Wrap command names in backticks + // (?m) enables multi-line mode for ^ and $ + let re = Regex::new("(?m)^ ([a-z-]+) +(.*)$").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, " `$1`: $2"); + + // Add * to list items + let re = Regex::new("(?m)^ (.*)$").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "* $1"); + + // Turn section names to headings + let re = Regex::new("(?m)^([A-Z-]+):$").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "#### $1"); + + text.to_string() +} + +fn markdown_to_html(text: &str) -> String { + // CommonMark's spec allows HTML tags; however, CLI required arguments look + // very much like tags so escape them. + let text = text.replace("<", "<").replace(">", ">"); + + let mut html_output = String::new(); + + let parser = pulldown_cmark::Parser::new(&text); + pulldown_cmark::html::push_html(&mut html_output, parser); + + html_output } From cc3ef1a8be08b9212a16957062304d7bd5da1111 Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Fri, 21 Jan 2022 11:06:16 +0200 Subject: [PATCH 111/445] Improve help text for admin commands --- src/database/admin.rs | 27 ++++++++++++++++++++++----- 1 file changed, 22 insertions(+), 5 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index f690bdf4..362ef294 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -147,6 +147,7 @@ impl Admin { } } +// Parse chat messages from the admin room into an AdminCommand object pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) -> AdminCommand { let mut argv: Vec<_> = command_line.split_whitespace().skip(1).collect(); @@ -191,10 +192,13 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - #[derive(StructOpt)] enum AdminCommands { #[structopt(verbatim_doc_comment)] - /// Register a bridge using its registration YAML + /// Register an appservice using its registration YAML /// - /// This command needs a YAML generated by an appservice (such as a mautrix - /// bridge), which must be provided in a code-block below the command. + /// This command needs a YAML generated by an appservice (such as a bridge), + /// which must be provided in a Markdown code-block below the command. + /// + /// Registering a new bridge using the ID of an existing bridge will replace + /// the old one. /// /// Example: /// ```` @@ -204,16 +208,27 @@ enum AdminCommands { /// ``` /// ```` RegisterAppservice, - /// Unregister a bridge using its ID + + /// Unregister an appservice using its ID + /// + /// You can find the ID using the `list-appservices` command. UnregisterAppservice { appservice_identifier: String }, - /// List all the currently registered bridges + + /// List all the currently registered appservices ListAppservices, + /// Get the auth_chain of a PDU GetAuthChain { event_id: Box }, + /// Parse and print a PDU from a JSON + /// + /// The PDU event is only checked for validity and is not added to the + /// database. ParsePdu, + /// Retrieve and print a PDU by ID from the Conduit database GetPdu { event_id: Box }, + /// Print database memory usage statistics DatabaseMemoryUsage, } @@ -365,6 +380,7 @@ pub fn try_parse_admin_command( Ok(admin_command) } +// Utility to turn structopt's `--help` text to markdown. fn usage_to_markdown(text: &str) -> String { // For the conduit admin room, subcommands become main commands let text = text.replace("SUBCOMMAND", "COMMAND"); @@ -390,6 +406,7 @@ fn usage_to_markdown(text: &str) -> String { text.to_string() } +// Convert markdown to HTML using the CommonMark flavor fn markdown_to_html(text: &str) -> String { // CommonMark's spec allows HTML tags; however, CLI required arguments look // very much like tags so escape them. From ba6d72f3f93aeb96c9ca98daab3c34e969c76008 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Fri, 21 Jan 2022 14:28:07 +0100 Subject: [PATCH 112/445] Reformatted --- src/database/users.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/database/users.rs b/src/database/users.rs index 9b986d4e..a6b6fabb 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -147,7 +147,10 @@ impl Users { match utils::string_from_bytes(username) { Ok(u) => Some(u), Err(e) => { - warn!("Failed to parse username while calling get_local_users(): {}", e.to_string()); + warn!( + "Failed to parse username while calling get_local_users(): {}", + e.to_string() + ); None } } From 57979da28c0af4bc14787575d94308d5762e7dc6 Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Fri, 21 Jan 2022 17:34:21 +0200 Subject: [PATCH 113/445] Change structopt to clap, remove markdown dependency --- Cargo.lock | 75 ++++++++++++++++++++++- Cargo.toml | 3 +- src/database/admin.rs | 135 ++++++++++++++++++++++++++---------------- 3 files changed, 158 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5be10f14..ae385fe6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -269,6 +269,33 @@ dependencies = [ "libloading", ] +[[package]] +name = "clap" +version = "3.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a30c3bf9ff12dfe5dae53f0a96e0febcd18420d1c0e7fad77796d9d5c4b5375" +dependencies = [ + "bitflags", + "clap_derive", + "indexmap", + "lazy_static", + "os_str_bytes", + "textwrap", +] + +[[package]] +name = "clap_derive" +version = "3.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "517358c28fcef6607bf6f76108e02afad7e82297d132a6b846dcc1fc3efcd153" +dependencies = [ + "heck 0.4.0", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -281,6 +308,7 @@ version = "0.2.0" dependencies = [ "base64 0.13.0", "bytes", + "clap", "crossbeam", "directories", "heed", @@ -630,7 +658,7 @@ version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" dependencies = [ - "heck", + "heck 0.3.3", "proc-macro2", "quote", "syn", @@ -902,6 +930,12 @@ dependencies = [ "unicode-segmentation", ] +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + [[package]] name = "heed" version = "0.10.6" @@ -1570,6 +1604,15 @@ dependencies = [ "num-traits", ] +[[package]] +name = "os_str_bytes" +version = "6.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" +dependencies = [ + "memchr", +] + [[package]] name = "page_size" version = "0.4.2" @@ -1728,6 +1771,30 @@ dependencies = [ "toml", ] +[[package]] +name = "proc-macro-error" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] + +[[package]] +name = "proc-macro-error-attr" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] + [[package]] name = "proc-macro-hack" version = "0.5.19" @@ -2863,6 +2930,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "textwrap" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80" + [[package]] name = "thiserror" version = "1.0.30" diff --git a/Cargo.toml b/Cargo.toml index 9a2d2fdb..3f8677d6 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,8 +86,7 @@ thread_local = "1.1.3" hmac = "0.11.0" sha-1 = "0.9.8" # used for conduit's CLI and admin room command parsing -structopt = { version = "0.3.25", default-features = false } -pulldown-cmark = "0.9.1" +clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] diff --git a/src/database/admin.rs b/src/database/admin.rs index 362ef294..59b8acdf 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -5,6 +5,7 @@ use crate::{ pdu::PduBuilder, server_server, Database, PduEvent, }; +use clap::Parser; use regex::Regex; use rocket::{ futures::{channel::mpsc, stream::StreamExt}, @@ -15,7 +16,6 @@ use ruma::{ EventId, RoomId, RoomVersionId, UserId, }; use serde_json::value::to_raw_value; -use structopt::StructOpt; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; use tracing::warn; @@ -155,7 +155,7 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - Some(command) => *command, None => { let markdown_message = "No command given. Use `help` for a list of commands."; - let html_message = markdown_to_html(&markdown_message); + let html_message = "No command given. Use help for a list of commands."; return AdminCommand::SendMessage(RoomMessageEventContent::text_html( markdown_message, @@ -164,10 +164,17 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - } }; + // Replace `help command` with `command --help` + // Clap has a help subcommand, but it omits the long help description. + if argv[0] == "help" { + argv.remove(0); + argv.push("--help"); + } + // Backwards compatibility with `register_appservice`-style commands let command_with_dashes; - if command_line.contains("_") { - command_with_dashes = command_name.replace("_", "-"); + if argv[0].contains("_") { + command_with_dashes = argv[0].replace("_", "-"); argv[0] = &command_with_dashes; } @@ -179,7 +186,11 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - ```\n{}\n```", command_name, error, ); - let html_message = markdown_to_html(&markdown_message); + let html_message = format!( + "Encountered an error while handling the {} command:\n\ +
              \n{}\n
              ", + command_name, error, + ); AdminCommand::SendMessage(RoomMessageEventContent::text_html( markdown_message, @@ -189,9 +200,10 @@ pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) - } } -#[derive(StructOpt)] +#[derive(Parser)] +#[clap(name = "@conduit:example.com", version = env!("CARGO_PKG_VERSION"))] enum AdminCommands { - #[structopt(verbatim_doc_comment)] + #[clap(verbatim_doc_comment)] /// Register an appservice using its registration YAML /// /// This command needs a YAML generated by an appservice (such as a bridge), @@ -200,25 +212,25 @@ enum AdminCommands { /// Registering a new bridge using the ID of an existing bridge will replace /// the old one. /// - /// Example: - /// ```` - /// @conduit:example.com: register-appservice - /// ``` - /// yaml content here - /// ``` - /// ```` + /// [add-yaml-block-to-usage] RegisterAppservice, /// Unregister an appservice using its ID - /// + /// /// You can find the ID using the `list-appservices` command. - UnregisterAppservice { appservice_identifier: String }, + UnregisterAppservice { + /// The appservice to unregister + appservice_identifier: String, + }, /// List all the currently registered appservices ListAppservices, /// Get the auth_chain of a PDU - GetAuthChain { event_id: Box }, + GetAuthChain { + /// An event ID (the $ character followed by the base64 reference hash) + event_id: Box, + }, /// Parse and print a PDU from a JSON /// @@ -227,7 +239,10 @@ enum AdminCommands { ParsePdu, /// Retrieve and print a PDU by ID from the Conduit database - GetPdu { event_id: Box }, + GetPdu { + /// An event ID (a $ followed by the base64 reference hash) + event_id: Box, + }, /// Print database memory usage statistics DatabaseMemoryUsage, @@ -239,16 +254,16 @@ pub fn try_parse_admin_command( body: Vec<&str>, ) -> Result { argv.insert(0, "@conduit:example.com:"); - let command = match AdminCommands::from_iter_safe(argv) { + let command = match AdminCommands::try_parse_from(argv) { Ok(command) => command, Err(error) => { - println!("Before:\n{}\n", error.to_string()); - let markdown_message = usage_to_markdown(&error.to_string()) + let message = error + .to_string() .replace("example.com", db.globals.server_name().as_str()); - let html_message = markdown_to_html(&markdown_message); + let html_message = usage_to_html(&message); return Ok(AdminCommand::SendMessage( - RoomMessageEventContent::text_html(markdown_message, html_message), + RoomMessageEventContent::text_html(message, html_message), )); } }; @@ -380,42 +395,58 @@ pub fn try_parse_admin_command( Ok(admin_command) } -// Utility to turn structopt's `--help` text to markdown. -fn usage_to_markdown(text: &str) -> String { +// Utility to turn clap's `--help` text to HTML. +fn usage_to_html(text: &str) -> String { // For the conduit admin room, subcommands become main commands let text = text.replace("SUBCOMMAND", "COMMAND"); let text = text.replace("subcommand", "command"); - // Put the first line (command name and version text) on its own paragraph - let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "*$1*\n\n"); + // Escape option names (e.g. ``) since they look like HTML tags + let text = text.replace("<", "<").replace(">", ">"); - // Wrap command names in backticks - // (?m) enables multi-line mode for ^ and $ - let re = Regex::new("(?m)^ ([a-z-]+) +(.*)$").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, " `$1`: $2"); + // Italicize the first line (command name and version text) + let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1\n"); - // Add * to list items - let re = Regex::new("(?m)^ (.*)$").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "* $1"); + // Unmerge wrapped lines + let text = text.replace("\n ", " "); - // Turn section names to headings - let re = Regex::new("(?m)^([A-Z-]+):$").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "#### $1"); + // Wrap option names in backticks. The lines look like: + // -V, --version Prints version information + // And are converted to: + // -V, --version: Prints version information + // (?m) enables multi-line mode for ^ and $ + let re = Regex::new("(?m)^ (([a-zA-Z_&;-]+(, )?)+) +(.*)$") + .expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1: $4"); + + // // Enclose examples in code blocks + // // (?ms) enables multi-line mode and dot-matches-all + // let re = + // Regex::new("(?ms)^Example:\n(.*?)\nUSAGE:$").expect("Regex compilation should not fail"); + // let text = re.replace_all(&text, "EXAMPLE:\n
              $1
              \nUSAGE:"); + + let has_yaml_block_marker = text.contains("\n[add-yaml-block-to-usage]\n"); + let text = text.replace("\n[add-yaml-block-to-usage]\n", ""); + + // Add HTML line-breaks + let text = text.replace("\n", "
              \n"); + + let text = if !has_yaml_block_marker { + // Wrap the usage line in code tags + let re = Regex::new("(?m)^USAGE:
              \n (@conduit:.*)
              $") + .expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:
              \n$1
              ") + } else { + // Wrap the usage line in a code block, and add a yaml block example + // This makes the usage of e.g. `register-appservice` more accurate + let re = Regex::new("(?m)^USAGE:
              \n (.*?)
              \n
              \n") + .expect("Regex compilation should not fail"); + re.replace_all( + &text, + "USAGE:
              \n
              $1\n```\nyaml content here\n```
              ", + ) + }; text.to_string() } - -// Convert markdown to HTML using the CommonMark flavor -fn markdown_to_html(text: &str) -> String { - // CommonMark's spec allows HTML tags; however, CLI required arguments look - // very much like tags so escape them. - let text = text.replace("<", "<").replace(">", ">"); - - let mut html_output = String::new(); - - let parser = pulldown_cmark::Parser::new(&text); - pulldown_cmark::html::push_html(&mut html_output, parser); - - html_output -} From 97d56af5bd48474efc9aa1ac94ed4295e282d8ca Mon Sep 17 00:00:00 2001 From: Reiner Herrmann Date: Sat, 15 Jan 2022 17:23:14 +0000 Subject: [PATCH 114/445] Add heisenbridge to tested appservices --- APPSERVICES.md | 1 + 1 file changed, 1 insertion(+) diff --git a/APPSERVICES.md b/APPSERVICES.md index f23918b4..5ff223ed 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -58,3 +58,4 @@ These appservices have been tested and work with Conduit without any extra steps - [mautrix-hangouts](https://github.com/mautrix/hangouts/) - [mautrix-telegram](https://github.com/mautrix/telegram/) - [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward. +- [heisenbridge](https://github.com/hifi/heisenbridge/) From d94f3c1e9aad363aff552933b104944094f7ddc2 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Fri, 21 Jan 2022 17:06:15 +0100 Subject: [PATCH 115/445] fix: make sure cc-rs and bindgen use the correct paths when cross-compiling --- .gitlab-ci.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cdc1d4cb..d0d4f3e3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,6 +33,12 @@ variables: - "rustup target add $TARGET" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi + # Make sure that cc-rs links the correct libraries when cross-compiling (required for compiling librocksdb-sys) + # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information + - 'export TARGET_CFLAGS="-L$TARGET_HOME/lib -latomic"' + # Make sure that rust-bindgen uses the correct include path when cross-compiling (required for compiling librocksdb-sys) + # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information + - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' script: - time cargo build --target $TARGET --release - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' From bfef94f5f4f465156317e5a6d60fffc8e1fd9240 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Fri, 21 Jan 2022 17:26:25 +0100 Subject: [PATCH 116/445] fix: linking against libatomic is no longer required since the library path is fixed --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index d0d4f3e3..236ce0ae 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,7 +35,7 @@ variables: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi # Make sure that cc-rs links the correct libraries when cross-compiling (required for compiling librocksdb-sys) # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information - - 'export TARGET_CFLAGS="-L$TARGET_HOME/lib -latomic"' + - 'export TARGET_CFLAGS="-L$TARGET_HOME/lib"' # Make sure that rust-bindgen uses the correct include path when cross-compiling (required for compiling librocksdb-sys) # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' From f88523988e23a09ffc5e1b9ab19e435863be3a9a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 21 Jan 2022 09:19:19 +0100 Subject: [PATCH 117/445] improvement: use jemalloc for lower memory usage --- Cargo.lock | 258 ++++++++++++++++++++++++---------------------------- Cargo.toml | 10 +- src/main.rs | 7 ++ 3 files changed, 132 insertions(+), 143 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8fe767e1..493ac082 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -14,7 +14,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "once_cell", "version_check", ] @@ -303,8 +303,6 @@ dependencies = [ "ruma", "rusqlite", "rust-argon2", - "rustls 0.19.1", - "rustls-native-certs 0.5.0", "serde", "serde_json", "serde_yaml", @@ -313,12 +311,13 @@ dependencies = [ "thiserror", "thread_local", "threadpool", + "tikv-jemalloc-ctl", + "tikv-jemallocator", "tokio", "tracing", "tracing-flame", "tracing-subscriber 0.2.25", "trust-dns-resolver", - "webpki 0.22.0", ] [[package]] @@ -350,22 +349,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "core-foundation" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6888e10551bb93e424d8df1d07f1a8b4fceb0001a3a4b048bfc47554946f47b3" -dependencies = [ - "core-foundation-sys", - "libc", -] - -[[package]] -name = "core-foundation-sys" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" - [[package]] name = "cpufeatures" version = "0.2.1" @@ -392,9 +375,9 @@ checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" [[package]] name = "crc32fast" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "738c290dfaea84fc1ca15ad9c168d083b05a714e1efddd8edaab678dc28d2836" +checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" dependencies = [ "cfg-if 1.0.0", ] @@ -713,6 +696,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs_extra" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" + [[package]] name = "futures" version = "0.3.19" @@ -847,9 +836,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" dependencies = [ "cfg-if 1.0.0", "libc", @@ -1035,7 +1024,7 @@ dependencies = [ "httpdate", "itoa 0.4.8", "pin-project-lite", - "socket2 0.4.2", + "socket2 0.4.3", "tokio", "tower-service", "tracing", @@ -1180,9 +1169,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7cc9ffccd38c451a86bf13657df244e9c3f37493cce8e5e21e940963777acc84" +checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" dependencies = [ "wasm-bindgen", ] @@ -1224,15 +1213,15 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.112" +version = "0.2.113" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b03d17f364a3a042d5e5d46b053bbbf82c92c9430c592dd4c064dc6ee997125" +checksum = "eef78b64d87775463c549fbd80e19249ef436ea3bf1de2a1eb7e717ec7fab1e9" [[package]] name = "libloading" -version = "0.7.2" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afe203d669ec979b7128619bae5a63b7b42e9203c1b29146079ee05e2f604b52" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ "cfg-if 1.0.0", "winapi", @@ -1308,7 +1297,7 @@ dependencies = [ "serde", "serde_json", "tracing", - "tracing-subscriber 0.3.5", + "tracing-subscriber 0.3.6", ] [[package]] @@ -1528,12 +1517,6 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" -[[package]] -name = "openssl-probe" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" - [[package]] name = "opentelemetry" version = "0.16.0" @@ -1621,6 +1604,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "paste" +version = "1.0.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" + [[package]] name = "pear" version = "0.2.3" @@ -1669,9 +1658,9 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "persy" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29c6aa7d7f093620a28b74fcf5f5da73ba17a9e52fcbbdbb4ecc89e61cb2d673" +checksum = "b71907e1dfa6844b657f5ca59e9a076e7d6281efb4885526ba9e235a18e7e3b3" dependencies = [ "crc", "data-encoding", @@ -1863,7 +1852,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", ] [[package]] @@ -1899,7 +1888,7 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", "redox_syscall", ] @@ -1982,7 +1971,6 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls 0.20.2", - "rustls-native-certs 0.6.1", "rustls-pemfile", "serde", "serde_json", @@ -1994,6 +1982,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", + "webpki-roots", "winreg 0.7.0", ] @@ -2443,30 +2432,6 @@ dependencies = [ "webpki 0.22.0", ] -[[package]] -name = "rustls-native-certs" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" -dependencies = [ - "openssl-probe", - "rustls 0.19.1", - "schannel", - "security-framework", -] - -[[package]] -name = "rustls-native-certs" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" -dependencies = [ - "openssl-probe", - "rustls-pemfile", - "schannel", - "security-framework", -] - [[package]] name = "rustls-pemfile" version = "0.2.1" @@ -2488,16 +2453,6 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" -[[package]] -name = "schannel" -version = "0.1.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" -dependencies = [ - "lazy_static", - "winapi", -] - [[package]] name = "scoped-tls" version = "1.0.0" @@ -2530,29 +2485,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "security-framework" -version = "2.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" -dependencies = [ - "bitflags", - "core-foundation", - "core-foundation-sys", - "libc", - "security-framework-sys", -] - -[[package]] -name = "security-framework-sys" -version = "2.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" -dependencies = [ - "core-foundation-sys", - "libc", -] - [[package]] name = "semver" version = "0.9.0" @@ -2570,18 +2502,18 @@ checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" [[package]] name = "serde" -version = "1.0.133" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97565067517b60e2d1ea8b268e59ce036de907ac523ad83a0475da04e818989a" +checksum = "96b3c34c1690edf8174f5b289a336ab03f568a4460d8c6df75f2f3a692b3bc6a" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.133" +version = "1.0.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed201699328568d8d08208fdd080e3ff594e6c422e438b6705905da01005d537" +checksum = "784ed1fbfa13fe191077537b0d70ec8ad1e903cfe04831da608aa36457cb653d" dependencies = [ "proc-macro2", "quote", @@ -2590,9 +2522,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.74" +version = "1.0.75" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee2bb9cd061c5865d345bb02ca49fcef1391741b672b54a0bf7b679badec3142" +checksum = "c059c05b48c5c0067d4b4b2b4f0732dd65feb52daf7e0ea09cd87e7dadc1af79" dependencies = [ "itoa 1.0.1", "ryu", @@ -2601,12 +2533,12 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 0.4.8", + "itoa 1.0.1", "ryu", "serde", ] @@ -2638,9 +2570,18 @@ dependencies = [ [[package]] name = "sha1" -version = "0.6.0" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" +dependencies = [ + "sha1_smol", +] + +[[package]] +name = "sha1_smol" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" [[package]] name = "sha2" @@ -2721,9 +2662,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ecab6c735a6bb4139c0caafd0cc3635748bbb3acf4550e8138122099251f309" +checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" [[package]] name = "socket2" @@ -2738,9 +2679,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.2" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5dc90fe6c7be1a323296982db1836d1ea9e47b6839496dde9a541bc496df3516" +checksum = "0f82496b90c36d70af5fcd482edaa2e0bd16fade569de1330405fecbbdac736b" dependencies = [ "libc", "winapi", @@ -2851,9 +2792,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.85" +version = "1.0.86" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a684ac3dcd8913827e18cd09a68384ee66c1de24157e3c556c9ab16d85695fb7" +checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" dependencies = [ "proc-macro2", "quote", @@ -2946,6 +2887,38 @@ dependencies = [ "threadpool", ] +[[package]] +name = "tikv-jemalloc-ctl" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb833c46ecbf8b6daeccb347cefcabf9c1beb5c9b0f853e1cec45632d9963e69" +dependencies = [ + "libc", + "paste", + "tikv-jemalloc-sys", +] + +[[package]] +name = "tikv-jemalloc-sys" +version = "0.4.2+5.2.1-patched.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5844e429d797c62945a566f8da4e24c7fe3fbd5d6617fd8bf7a0b7dc1ee0f22e" +dependencies = [ + "cc", + "fs_extra", + "libc", +] + +[[package]] +name = "tikv-jemallocator" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c14a5a604eb8715bc5785018a37d00739b180bcf609916ddf4393d33d49ccdf" +dependencies = [ + "libc", + "tikv-jemalloc-sys", +] + [[package]] name = "time" version = "0.1.43" @@ -3200,9 +3173,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.5" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5d81bfa81424cc98cb034b837c985b7a290f592e5b4322f353f94a0ab0f9f594" +checksum = "77be66445c4eeebb934a7340f227bfe7b338173d3f8c00a60a5a58005c9faecf" dependencies = [ "ansi_term", "lazy_static", @@ -3355,7 +3328,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.4", ] [[package]] @@ -3394,9 +3367,9 @@ checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" [[package]] name = "wasm-bindgen" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632f73e236b219150ea279196e54e610f5dbafa5d61786303d4da54f84e47fce" +checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -3404,9 +3377,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a317bf8f9fba2476b4b2c85ef4c4af8ff39c3c7f0cdfeed4f82c34a880aa837b" +checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" dependencies = [ "bumpalo", "lazy_static", @@ -3419,9 +3392,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d7523cb1f2a4c96c1317ca690031b714a51cc14e05f712446691f413f5d39" +checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3431,9 +3404,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d56146e7c495528bf6587663bea13a8eb588d39b36b679d83972e1a2dbbdacf9" +checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3441,9 +3414,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7803e0eea25835f8abdc585cd3021b3deb11543c6fe226dcd30b228857c5c5ab" +checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" dependencies = [ "proc-macro2", "quote", @@ -3454,15 +3427,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.78" +version = "0.2.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0237232789cf037d5480773fe568aac745bfe2afbc11a863e97901780a6b47cc" +checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" [[package]] name = "web-sys" -version = "0.3.55" +version = "0.3.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38eb105f1c59d9eaa6b5cdc92b859d85b926e82cb2e0945cd0c9259faa6fe9fb" +checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" dependencies = [ "js-sys", "wasm-bindgen", @@ -3488,6 +3461,15 @@ dependencies = [ "untrusted", ] +[[package]] +name = "webpki-roots" +version = "0.22.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" +dependencies = [ + "webpki 0.22.0", +] + [[package]] name = "weezl" version = "0.1.5" @@ -3563,18 +3545,18 @@ checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" [[package]] name = "zeroize" -version = "1.4.3" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d68d9dcec5f9b43a30d38c49f91dfedfaac384cb8f085faca366c26207dd1619" +checksum = "cc222aec311c323c717f56060324f32b82da1ce1dd81d9a09aa6a9030bfe08db" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.2.2" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "65f1a51723ec88c66d5d1fe80c841f17f63587d6691901d66be9bec6c3b51f73" +checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" dependencies = [ "proc-macro2", "quote", diff --git a/Cargo.toml b/Cargo.toml index e3614ec4..78a4c8ff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,11 +48,7 @@ rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls-native-roots", "socks"] } -# Custom TLS verifier -rustls = { version = "0.19.1", features = ["dangerous_configuration"] } -rustls-native-certs = "0.5.0" -webpki = "0.22.0" +reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls", "socks"] } # Used for conduit::Error type thiserror = "1.0.28" # Used to generate thumbnails for images @@ -87,6 +83,10 @@ thread_local = "1.1.3" hmac = "0.11.0" sha-1 = "0.9.8" +[target.'cfg(not(target_env = "msvc"))'.dependencies] +tikv-jemalloc-ctl = { version = "0.4.2", features = ['use_std'] } +tikv-jemallocator = { version = "0.4.1", features = ['unprefixed_malloc_on_supported_platforms'] } + [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] backend_sled = ["sled"] diff --git a/src/main.rs b/src/main.rs index d9bbc240..b18ca803 100644 --- a/src/main.rs +++ b/src/main.rs @@ -25,6 +25,13 @@ use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate pub use rocket::State; +#[cfg(not(target_env = "msvc"))] +use tikv_jemallocator::Jemalloc; + +#[cfg(not(target_env = "msvc"))] +#[global_allocator] +static GLOBAL: Jemalloc = Jemalloc; + fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { rocket::custom(config) .manage(data) From 3e9abfedb43e6f52ebb3f24adaf8bf0871712181 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sat, 22 Jan 2022 00:14:19 +0100 Subject: [PATCH 118/445] fix: make sure libstdc++ is linked statically when cross-compiling --- .gitlab-ci.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 236ce0ae..5cae7439 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -33,10 +33,10 @@ variables: - "rustup target add $TARGET" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi - # Make sure that cc-rs links the correct libraries when cross-compiling (required for compiling librocksdb-sys) + # Make sure that cc-rs links the correct libraries statically when cross-compiling # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information - - 'export TARGET_CFLAGS="-L$TARGET_HOME/lib"' - # Make sure that rust-bindgen uses the correct include path when cross-compiling (required for compiling librocksdb-sys) + - 'export CARGO_BUILD_RUSTFLAGS="-L$TARGET_HOME/lib" CXXSTDLIB="static=stdc++"' + # Make sure that rust-bindgen uses the correct include path when cross-compiling # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' script: From a021680591cf581fccd05a9dbb0914163f69e8ba Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sat, 22 Jan 2022 01:14:36 +0100 Subject: [PATCH 119/445] fix: make sure libatomic is always linked because it's skipped on arm targets --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 5cae7439..b863de98 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,7 +35,7 @@ variables: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi # Make sure that cc-rs links the correct libraries statically when cross-compiling # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information - - 'export CARGO_BUILD_RUSTFLAGS="-L$TARGET_HOME/lib" CXXSTDLIB="static=stdc++"' + - 'export CARGO_BUILD_RUSTFLAGS="-L$TARGET_HOME/lib -latomic" CXXSTDLIB="static=stdc++"' # Make sure that rust-bindgen uses the correct include path when cross-compiling # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' From cd9902637ddf3f8e7711f01a5cf044725704e28a Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sat, 22 Jan 2022 17:34:30 +0100 Subject: [PATCH 120/445] feat: use rustembedded/cross images and use static relocation model to fix cross-compile --- .gitlab-ci.yml | 39 ++++++++++++++++++++++++--------------- CROSS_COMPILE.md | 11 ----------- Cross.toml | 11 +++++++++++ DEPLOY.md | 2 +- cross/README.md | 37 +++++++++++++++++++++++++++++++++++++ cross/build.sh | 31 +++++++++++++++++++++++++++++++ cross/test.sh | 8 ++++++++ 7 files changed, 112 insertions(+), 27 deletions(-) delete mode 100644 CROSS_COMPILE.md create mode 100644 Cross.toml create mode 100644 cross/README.md create mode 100755 cross/build.sh create mode 100755 cross/test.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b863de98..993145a1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -21,33 +21,46 @@ variables: - if: '$CI_COMMIT_BRANCH == "next"' - if: "$CI_COMMIT_TAG" interruptible: true - image: "rust:1.56" + image: "rust:1.58" tags: ["docker"] + services: ["docker:dind"] variables: + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 + SHARED_PATH: $CI_PROJECT_DIR/shared/ CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow before_script: - 'echo "Building for target $TARGET"' - - "rustc --version && cargo --version && rustup show" # Print version info for debugging - - "rustup target add $TARGET" + - "rustup show && rustc --version && cargo --version" # Print version info for debugging # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi - # Make sure that cc-rs links the correct libraries statically when cross-compiling - # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information - - 'export CARGO_BUILD_RUSTFLAGS="-L$TARGET_HOME/lib -latomic" CXXSTDLIB="static=stdc++"' - # Make sure that rust-bindgen uses the correct include path when cross-compiling - # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information - - 'export BINDGEN_EXTRA_CLANG_ARGS="-I$TARGET_C_INCLUDE_PATH"' script: - - time cargo build --target $TARGET --release + # install cross-compiling prerequisites + - 'apt-get update && apt-get install -y docker.io && docker version' # install docker + - 'cargo install cross && cross --version' # install cross + # fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227) + - 'mkdir -p $SHARED_PATH/cargo' + - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' + - 'cp -r $RUSTUP_HOME $SHARED_PATH' + - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + # cross-compile conduit for target + - 'time ./cross/build.sh --locked --release' - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' + cache: + # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci + - key: 'cargo-cache-$TARGET' + paths: + - $SHARED_PATH/cargo/registry/index + - $SHARED_PATH/cargo/registry/cache + - $SHARED_PATH/cargo/git/db artifacts: expire_in: never build:release:cargo:x86_64-unknown-linux-musl-with-debug: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:x86_64-musl variables: CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling TARGET: "x86_64-unknown-linux-musl" @@ -61,7 +74,6 @@ build:release:cargo:x86_64-unknown-linux-musl-with-debug: build:release:cargo:x86_64-unknown-linux-musl: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:x86_64-musl variables: TARGET: "x86_64-unknown-linux-musl" artifacts: @@ -72,7 +84,6 @@ build:release:cargo:x86_64-unknown-linux-musl: build:release:cargo:arm-unknown-linux-musleabihf: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:arm-musleabihf variables: TARGET: "arm-unknown-linux-musleabihf" artifacts: @@ -83,7 +94,6 @@ build:release:cargo:arm-unknown-linux-musleabihf: build:release:cargo:armv7-unknown-linux-musleabihf: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:armv7-musleabihf variables: TARGET: "armv7-unknown-linux-musleabihf" artifacts: @@ -94,7 +104,6 @@ build:release:cargo:armv7-unknown-linux-musleabihf: build:release:cargo:aarch64-unknown-linux-musl: extends: .build-cargo-shared-settings - image: messense/rust-musl-cross:aarch64-musl variables: TARGET: "aarch64-unknown-linux-musl" artifacts: diff --git a/CROSS_COMPILE.md b/CROSS_COMPILE.md deleted file mode 100644 index e38a6ad7..00000000 --- a/CROSS_COMPILE.md +++ /dev/null @@ -1,11 +0,0 @@ -Install docker: - -``` -$ sudo apt install docker -$ sudo usermod -aG docker $USER -$ exec sudo su -l $USER -$ sudo systemctl start docker -$ cargo install cross -$ cross build --release --target armv7-unknown-linux-musleabihf -``` -The cross-compiled binary is at target/armv7-unknown-linux-musleabihf/release/conduit diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 00000000..491efcb7 --- /dev/null +++ b/Cross.toml @@ -0,0 +1,11 @@ +[target.aarch64-unknown-linux-musl] +image = "rust-cross:aarch64-unknown-linux-musl" + +[target.arm-unknown-linux-musleabihf] +image = "rust-cross:arm-unknown-linux-musleabihf" + +[target.armv7-unknown-linux-musleabihf] +image = "rust-cross:armv7-unknown-linux-musleabihf" + +[target.x86_64-unknown-linux-musl] +image = "rust-cross:x86_64-unknown-linux-musl" diff --git a/DEPLOY.md b/DEPLOY.md index 0058b93d..38e1e286 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -37,7 +37,7 @@ $ cargo build --release Note that this currently requires Rust 1.50. -If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). +If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md). ## Adding a Conduit user diff --git a/cross/README.md b/cross/README.md new file mode 100644 index 00000000..2829d239 --- /dev/null +++ b/cross/README.md @@ -0,0 +1,37 @@ +## Cross compilation + +The `cross` folder contains a set of convenience scripts (`build.sh` and `test.sh`) for cross-compiling Conduit. + +Currently supported targets are + +- aarch64-unknown-linux-musl +- arm-unknown-linux-musleabihf +- armv7-unknown-linux-musleabihf +- x86\_64-unknown-linux-musl + +### Install prerequisites +#### Docker +[Installation guide](https://docs.docker.com/get-docker/). +```sh +$ sudo apt install docker +$ sudo systemctl start docker +$ sudo usermod -aG docker $USER +$ newgrp docker +``` + +#### Cross +[Installation guide](https://github.com/rust-embedded/cross/#installation). +```sh +$ cargo install cross +``` + +### Buiding Conduit +```sh +$ TARGET=armv7-unknown-linux-musleabihf ./cross/build.sh --release +``` +The cross-compiled binary is at `target/armv7-unknown-linux-musleabihf/release/conduit` + +### Testing Conduit +```sh +$ TARGET=armv7-unknown-linux-musleabihf ./cross/test.sh --release +``` diff --git a/cross/build.sh b/cross/build.sh new file mode 100755 index 00000000..4a6d4493 --- /dev/null +++ b/cross/build.sh @@ -0,0 +1,31 @@ +#!/bin/bash +set -ex + +# build custom container with libclang and static compilation +tag="rust-cross:${TARGET:?}" +docker build --tag="$tag" - << EOF +FROM rustembedded/cross:$TARGET + +# Install libclang for generating bindings with rust-bindgen +# The architecture is not relevant here since it's not used for compilation +RUN apt-get update && \ + apt-get install --assume-yes libclang-dev + +# Set the target prefix +ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's/-unknown//')" + +# Make sure that cc-rs links libc/libstdc++ statically when cross-compiling +# See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information +ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" +# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing +$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic -lstatic=c"') +# Strip symbols while compiling in release mode +$([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') + +# Make sure that rust-bindgen uses the correct include path when cross-compiling +# See https://github.com/rust-lang/rust-bindgen#environment-variables for more information +ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" +EOF + +# build conduit for a specific target +cross build --target="$TARGET" $@ diff --git a/cross/test.sh b/cross/test.sh new file mode 100755 index 00000000..0aa0909c --- /dev/null +++ b/cross/test.sh @@ -0,0 +1,8 @@ +#!/usr/bin/env sh +set -ex + +# Build conduit for a specific target +cross/build.sh $@ + +# Test conduit for a specific target +cross test --target="$TARGET" $@ From fd67cd7450e33b97050372bdd13832828fa75458 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 15:10:42 +0100 Subject: [PATCH 121/445] feat: support targetting i686 --- .gitlab-ci.yml | 28 +++++++++++++++++++--------- Cross.toml | 3 +++ cross/build.sh | 6 ++++-- 3 files changed, 26 insertions(+), 11 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 993145a1..b5a12f3d 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -37,7 +37,6 @@ variables: - "rustup show && rustc --version && cargo --version" # Print version info for debugging # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi - script: # install cross-compiling prerequisites - 'apt-get update && apt-get install -y docker.io && docker version' # install docker - 'cargo install cross && cross --version' # install cross @@ -46,16 +45,17 @@ variables: - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - 'cp -r $RUSTUP_HOME $SHARED_PATH' - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + script: # cross-compile conduit for target - 'time ./cross/build.sh --locked --release' - - 'cp "target/$TARGET/release/conduit" "conduit-$TARGET"' + - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci - - key: 'cargo-cache-$TARGET' - paths: - - $SHARED_PATH/cargo/registry/index - - $SHARED_PATH/cargo/registry/cache - - $SHARED_PATH/cargo/git/db + key: 'cargo-cache-$TARGET' + paths: + - $SHARED_PATH/cargo/registry/index + - $SHARED_PATH/cargo/registry/cache + - $SHARED_PATH/cargo/git/db artifacts: expire_in: never @@ -82,6 +82,16 @@ build:release:cargo:x86_64-unknown-linux-musl: - "conduit-x86_64-unknown-linux-musl" expose_as: "Conduit for x86_64-unknown-linux-musl" +build:release:cargo:i686-unknown-linux-musl: + extends: .build-cargo-shared-settings + variables: + TARGET: "i686-unknown-linux-musl" + artifacts: + name: "conduit-i686-unknown-linux-musl" + paths: + - "conduit-i686-unknown-linux-musl" + expose_as: "Conduit for i686-unknown-linux-musl" + build:release:cargo:arm-unknown-linux-musleabihf: extends: .build-cargo-shared-settings variables: @@ -119,14 +129,14 @@ build:release:cargo:aarch64-unknown-linux-musl: cache: key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: - - "time cargo build --target $TARGET" + # cross-compile conduit for target + - 'time ./cross/build.sh --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' artifacts: expire_in: 4 weeks build:debug:cargo:x86_64-unknown-linux-musl: extends: ".cargo-debug-shared-settings" - image: messense/rust-musl-cross:x86_64-musl variables: TARGET: "x86_64-unknown-linux-musl" artifacts: diff --git a/Cross.toml b/Cross.toml index 491efcb7..22c84b97 100644 --- a/Cross.toml +++ b/Cross.toml @@ -7,5 +7,8 @@ image = "rust-cross:arm-unknown-linux-musleabihf" [target.armv7-unknown-linux-musleabihf] image = "rust-cross:armv7-unknown-linux-musleabihf" +[target.i686-unknown-linux-musl] +image = "rust-cross:i686-unknown-linux-musl" + [target.x86_64-unknown-linux-musl] image = "rust-cross:x86_64-unknown-linux-musl" diff --git a/cross/build.sh b/cross/build.sh index 4a6d4493..24a2224b 100755 --- a/cross/build.sh +++ b/cross/build.sh @@ -17,8 +17,10 @@ ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's # Make sure that cc-rs links libc/libstdc++ statically when cross-compiling # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" -# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic -lstatic=c"') +# Forcefully linking against libatomic and libgcc is required for arm32, otherwise symbols are missing +$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic"') +# Forcefully linking against libc is required for 32-bit, otherwise symbols are missing +$([[ $TARGET =~ arm|i686 ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -lstatic=c"') # Strip symbols while compiling in release mode $([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') From 219dfbabd58ff6008f8a85c291f8cf4f6da1318a Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 17:31:12 +0100 Subject: [PATCH 122/445] fix: pass RUSTC_WRAPPER to the cross container and enforce static builds --- .gitlab-ci.yml | 18 ++++++------------ Cross.toml | 3 --- cross/build.sh | 9 +++++---- 3 files changed, 11 insertions(+), 19 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index b5a12f3d..fac678cf 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -35,8 +35,6 @@ variables: before_script: - 'echo "Building for target $TARGET"' - "rustup show && rustc --version && cargo --version" # Print version info for debugging - # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi # install cross-compiling prerequisites - 'apt-get update && apt-get install -y docker.io && docker version' # install docker - 'cargo install cross && cross --version' # install cross @@ -45,10 +43,14 @@ variables: - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - 'cp -r $RUSTUP_HOME $SHARED_PATH' - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_BIN_URL}" ]; then export RUSTC_WRAPPER=$SHARED_PATH/cargo/bin/sccache && curl $SCCACHE_BIN_URL --output $RUSTC_WRAPPER && chmod +x $RUSTC_WRAPPER; fi script: # cross-compile conduit for target - 'time ./cross/build.sh --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' + # assert that the binary is statically linked + - 'file conduit-$TARGET | grep "static\(-pie\|ally\) linked"' cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci key: 'cargo-cache-$TARGET' @@ -82,16 +84,6 @@ build:release:cargo:x86_64-unknown-linux-musl: - "conduit-x86_64-unknown-linux-musl" expose_as: "Conduit for x86_64-unknown-linux-musl" -build:release:cargo:i686-unknown-linux-musl: - extends: .build-cargo-shared-settings - variables: - TARGET: "i686-unknown-linux-musl" - artifacts: - name: "conduit-i686-unknown-linux-musl" - paths: - - "conduit-i686-unknown-linux-musl" - expose_as: "Conduit for i686-unknown-linux-musl" - build:release:cargo:arm-unknown-linux-musleabihf: extends: .build-cargo-shared-settings variables: @@ -132,6 +124,8 @@ build:release:cargo:aarch64-unknown-linux-musl: # cross-compile conduit for target - 'time ./cross/build.sh --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' + # assert that the binary is statically linked + - 'file conduit-debug-$TARGET | grep "static\(-pie\|ally\) linked"' artifacts: expire_in: 4 weeks diff --git a/Cross.toml b/Cross.toml index 22c84b97..491efcb7 100644 --- a/Cross.toml +++ b/Cross.toml @@ -7,8 +7,5 @@ image = "rust-cross:arm-unknown-linux-musleabihf" [target.armv7-unknown-linux-musleabihf] image = "rust-cross:armv7-unknown-linux-musleabihf" -[target.i686-unknown-linux-musl] -image = "rust-cross:i686-unknown-linux-musl" - [target.x86_64-unknown-linux-musl] image = "rust-cross:x86_64-unknown-linux-musl" diff --git a/cross/build.sh b/cross/build.sh index 24a2224b..34082606 100755 --- a/cross/build.sh +++ b/cross/build.sh @@ -17,13 +17,14 @@ ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's # Make sure that cc-rs links libc/libstdc++ statically when cross-compiling # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" -# Forcefully linking against libatomic and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic"') -# Forcefully linking against libc is required for 32-bit, otherwise symbols are missing -$([[ $TARGET =~ arm|i686 ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -lstatic=c"') +# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing +$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic -lstatic=c"') # Strip symbols while compiling in release mode $([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') +# Support a rustc wrapper like sccache when cross-compiling +ENV RUSTC_WRAPPER="$RUSTC_WRAPPER" + # Make sure that rust-bindgen uses the correct include path when cross-compiling # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" From c2ad2b3dd747e7a8baa5ff2f9ade8edb92204aa6 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 17:38:13 +0100 Subject: [PATCH 123/445] fix: pass sccache variables to cross container with build.env.passthrough --- Cross.toml | 12 ++++++++++++ cross/build.sh | 3 --- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/Cross.toml b/Cross.toml index 491efcb7..a989a98f 100644 --- a/Cross.toml +++ b/Cross.toml @@ -1,3 +1,15 @@ +[build.env] +# CI uses an S3 endpoint to store sccache artifacts, so their config needs to +# be available in the cross container as well +passthrough = [ + "RUSTC_WRAPPER", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "SCCACHE_BUCKET", + "SCCACHE_ENDPOINT", + "SCCACHE_S3_USE_SSL", +] + [target.aarch64-unknown-linux-musl] image = "rust-cross:aarch64-unknown-linux-musl" diff --git a/cross/build.sh b/cross/build.sh index 34082606..4a6d4493 100755 --- a/cross/build.sh +++ b/cross/build.sh @@ -22,9 +22,6 @@ $([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clin # Strip symbols while compiling in release mode $([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') -# Support a rustc wrapper like sccache when cross-compiling -ENV RUSTC_WRAPPER="$RUSTC_WRAPPER" - # Make sure that rust-bindgen uses the correct include path when cross-compiling # See https://github.com/rust-lang/rust-bindgen#environment-variables for more information ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" From c7560b3502d27a49f935c347458df6421459c485 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 18:09:14 +0100 Subject: [PATCH 124/445] fix: remove libgcc dependency in ci builds since the binary is ensured to be statically compiled --- docker/ci-binaries-packaging.Dockerfile | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index f4603105..a6339be3 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -9,6 +9,7 @@ FROM docker.io/alpine:3.15.0 AS runner + # Standard port on which Conduit launches. # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 @@ -18,10 +19,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # Conduit needs: # ca-certificates: for https -# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. RUN apk add --no-cache \ - ca-certificates \ - libgcc + ca-certificates ARG CREATED From 64c25ea4a15739f75ebe2811e84dc00280ba5fb0 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 18:31:40 +0100 Subject: [PATCH 125/445] fix: always print ELF information --- .gitlab-ci.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index fac678cf..defd66ee 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -50,7 +50,8 @@ variables: - 'time ./cross/build.sh --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' # assert that the binary is statically linked - - 'file conduit-$TARGET | grep "static\(-pie\|ally\) linked"' + - 'ldd conduit-$TARGET' # print linking information + - 'file conduit-$TARGET | sed -e "/static\(-pie\|ally\) linked/!q1"' # print elf information cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci key: 'cargo-cache-$TARGET' @@ -125,7 +126,8 @@ build:release:cargo:aarch64-unknown-linux-musl: - 'time ./cross/build.sh --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' # assert that the binary is statically linked - - 'file conduit-debug-$TARGET | grep "static\(-pie\|ally\) linked"' + - 'ldd conduit-debug-$TARGET' # print linking information + - 'file conduit-debug-$TARGET | sed -e "/static\(-pie\|ally\) linked/!q1"' # print elf information artifacts: expire_in: 4 weeks From 77ad4cb8f8c69b563c890494b5d203d96195253d Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 19:24:36 +0100 Subject: [PATCH 126/445] fix: use readelf for checking static compilation --- .gitlab-ci.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index defd66ee..9e584a2a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -49,9 +49,9 @@ variables: # cross-compile conduit for target - 'time ./cross/build.sh --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' - # assert that the binary is statically linked - - 'ldd conduit-$TARGET' # print linking information - - 'file conduit-$TARGET | sed -e "/static\(-pie\|ally\) linked/!q1"' # print elf information + # print information about linking for debugging + - 'file conduit-$TARGET' # print file information + - 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci key: 'cargo-cache-$TARGET' @@ -125,9 +125,9 @@ build:release:cargo:aarch64-unknown-linux-musl: # cross-compile conduit for target - 'time ./cross/build.sh --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' - # assert that the binary is statically linked - - 'ldd conduit-debug-$TARGET' # print linking information - - 'file conduit-debug-$TARGET | sed -e "/static\(-pie\|ally\) linked/!q1"' # print elf information + # print information about linking for debugging + - 'file conduit-debug-$TARGET' # print file information + - 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked artifacts: expire_in: 4 weeks From 067fcfc0e40ced1c2ed28f32b04940c6e74d6f5a Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Sun, 23 Jan 2022 21:19:19 +0100 Subject: [PATCH 127/445] fix: remove trailing slash from shared path --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 9e584a2a..aac773c2 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -28,7 +28,7 @@ variables: DOCKER_HOST: tcp://docker:2375/ DOCKER_TLS_CERTDIR: "" DOCKER_DRIVER: overlay2 - SHARED_PATH: $CI_PROJECT_DIR/shared/ + SHARED_PATH: $CI_PROJECT_DIR/shared CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow From acf1585fc35e7851df9c30208f654d5c085267d6 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Mon, 24 Jan 2022 11:45:07 +0100 Subject: [PATCH 128/445] fix: make sure that libatomic is linked statically --- cross/build.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cross/build.sh b/cross/build.sh index 4a6d4493..8f64ff87 100755 --- a/cross/build.sh +++ b/cross/build.sh @@ -18,7 +18,7 @@ ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's # See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" # Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-lgcc -Clink-arg=-latomic -lstatic=c"') +$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-static-libgcc -Clink-arg=-lgcc -lstatic=atomic -lstatic=c"') # Strip symbols while compiling in release mode $([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') From ff167299766d41c361398243db6bf97e9d45fa65 Mon Sep 17 00:00:00 2001 From: Maxim De Clercq Date: Tue, 25 Jan 2022 22:36:51 +0100 Subject: [PATCH 129/445] fix: correct RUSTC_WRAPPER path in cross container --- .gitlab-ci.yml | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index aac773c2..741b5327 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,6 +8,10 @@ variables: GIT_SUBMODULE_STRATEGY: recursive FF_USE_FASTZIP: 1 CACHE_COMPRESSION_LEVEL: fastest + # Docker in Docker + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 # --------------------------------------------------------------------- # # Cargo: Compiling for different architectures # @@ -25,9 +29,6 @@ variables: tags: ["docker"] services: ["docker:dind"] variables: - DOCKER_HOST: tcp://docker:2375/ - DOCKER_TLS_CERTDIR: "" - DOCKER_DRIVER: overlay2 SHARED_PATH: $CI_PROJECT_DIR/shared CARGO_PROFILE_RELEASE_LTO: "true" CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" @@ -43,8 +44,9 @@ variables: - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - 'cp -r $RUSTUP_HOME $SHARED_PATH' - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' - # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_BIN_URL}" ]; then export RUSTC_WRAPPER=$SHARED_PATH/cargo/bin/sccache && curl $SCCACHE_BIN_URL --output $RUSTC_WRAPPER && chmod +x $RUSTC_WRAPPER; fi + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results. + # The sccache binary is stored in the sysroot of the rustc installation since that directory is added to the path of the cross container. + - if [ -n "${SCCACHE_BIN_URL}" ]; then RUSTC_SYSROOT=$(rustc --print sysroot) && curl $SCCACHE_BIN_URL --output $RUSTC_SYSROOT/bin/sccache && chmod +x $RUSTC_SYSROOT/bin/sccache && export RUSTC_WRAPPER=sccache; fi script: # cross-compile conduit for target - 'time ./cross/build.sh --locked --release' @@ -157,9 +159,6 @@ build:debug:cargo:x86_64-unknown-linux-musl: - "build:release:cargo:armv7-unknown-linux-musleabihf" - "build:release:cargo:aarch64-unknown-linux-musl" variables: - DOCKER_HOST: tcp://docker:2375/ - DOCKER_TLS_CERTDIR: "" - DOCKER_DRIVER: overlay2 PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64" DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" cache: From 8472eff277faf55808dc794a8eb9023fafb75763 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 00:25:20 +0100 Subject: [PATCH 130/445] Implement media download with custom filename --- src/client_server/media.rs | 64 +++++++++++++++++++++++++++++++++++++- src/main.rs | 1 + 2 files changed, 64 insertions(+), 1 deletion(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 0a7f4bb5..2e3cf056 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -4,7 +4,10 @@ use crate::{ }; use ruma::api::client::{ error::ErrorKind, - r0::media::{create_content, get_content, get_content_thumbnail, get_media_config}, + r0::media::{ + create_content, get_content, get_content_as_filename, get_content_thumbnail, + get_media_config, + }, }; use std::convert::TryInto; @@ -129,6 +132,65 @@ pub async fn get_content_route( } } +/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +/// +/// - Only allows federation if `allow_remote` is true +#[cfg_attr( + feature = "conduit_bin", + get("/_matrix/media/r0/download/<_>/<_>/<_>", data = "") +)] +#[tracing::instrument(skip(db, body))] +pub async fn get_content_as_filename_route( + db: DatabaseGuard, + body: Ruma>, +) -> ConduitResult { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_disposition: _, + content_type, + file, + }) = db.media.get(&db.globals, &mxc).await? + { + Ok(get_content_as_filename::Response { + file, + content_type, + content_disposition: Some(format!("inline; filename={}", body.filename)), + } + .into()) + } else if &*body.server_name != db.globals.server_name() && body.allow_remote { + let get_content_response = db + .sending + .send_federation_request( + &db.globals, + &body.server_name, + get_content_as_filename::Request { + allow_remote: false, + server_name: &body.server_name, + media_id: &body.media_id, + filename: &body.filename, + }, + ) + .await?; + + db.media + .create( + mxc, + &db.globals, + &get_content_response.content_disposition.as_deref(), + &get_content_response.content_type.as_deref(), + &get_content_response.file, + ) + .await?; + + Ok(get_content_response.into()) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + /// # `POST /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` /// /// Load media thumbnail from our server or over federation. diff --git a/src/main.rs b/src/main.rs index 56faa3e7..514c2448 100644 --- a/src/main.rs +++ b/src/main.rs @@ -136,6 +136,7 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< client_server::send_event_to_device_route, client_server::get_media_config_route, client_server::create_content_route, + client_server::get_content_as_filename_route, client_server::get_content_route, client_server::get_content_thumbnail_route, client_server::get_devices_route, From 52873c88b7e204f5b3c9295448c15780fff8084c Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 00:27:13 +0100 Subject: [PATCH 131/445] Fix incorrect HTTP method in doc comments of two media routes --- src/client_server/media.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 2e3cf056..5b196dff 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -74,7 +74,7 @@ pub async fn create_content_route( .into()) } -/// # `POST /_matrix/media/r0/download/{serverName}/{mediaId}` +/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` /// /// Load media from our server or over federation. /// @@ -191,7 +191,7 @@ pub async fn get_content_as_filename_route( } } -/// # `POST /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` +/// # `GET /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` /// /// Load media thumbnail from our server or over federation. /// From 9c2000cb8973894512940b96996a2b5937f5cc8f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 27 Jan 2022 16:17:55 +0100 Subject: [PATCH 132/445] Upgrade Ruma --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- src/database/key_backups.rs | 29 ++++++++--------------------- 3 files changed, 27 insertions(+), 40 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 493ac082..794a0257 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2107,7 +2107,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "assign", "js_int", @@ -2128,7 +2128,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "bytes", "http", @@ -2144,7 +2144,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2155,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "ruma-api", "ruma-common", @@ -2169,7 +2169,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "assign", "bytes", @@ -2189,7 +2189,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "indexmap", "js_int", @@ -2204,7 +2204,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "indoc", "js_int", @@ -2221,7 +2221,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2232,7 +2232,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "js_int", "ruma-api", @@ -2247,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2262,7 +2262,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2272,7 +2272,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "thiserror", ] @@ -2280,7 +2280,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "js_int", "ruma-api", @@ -2293,7 +2293,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "js_int", "ruma-api", @@ -2308,7 +2308,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "base64 0.13.0", "bytes", @@ -2323,7 +2323,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2334,7 +2334,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2351,7 +2351,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=08d60b3d376b63462f769d4b9bd3bbfb560d501a#08d60b3d376b63462f769d4b9bd3bbfb560d501a" +source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 78a4c8ff..9ba1ac05 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "08d60b3d376b63462f769d4b9bd3bbfb560d501a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "82becb86c837570224964425929d1b5305784435", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index b74bc408..2eefe481 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -7,7 +7,6 @@ use ruma::{ serde::Raw, RoomId, UserId, }; -use serde_json::json; use std::{collections::BTreeMap, sync::Arc}; use super::abstraction::Tree; @@ -212,13 +211,13 @@ impl KeyBackups { &self, user_id: &UserId, version: &str, - ) -> Result, Raw>> { + ) -> Result, RoomKeyBackup>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::, Raw>::new(); + let mut rooms = BTreeMap::, RoomKeyBackup>::new(); for result in self .backupkeyid_backup @@ -244,7 +243,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup room_id is invalid room id.") })?; - let key_data: serde_json::Value = serde_json::from_slice(&value).map_err(|_| { + let key_data = serde_json::from_slice(&value).map_err(|_| { Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") })?; @@ -252,25 +251,13 @@ impl KeyBackups { }) { let (room_id, session_id, key_data) = result?; - let room_key_backup = rooms.entry(room_id).or_insert_with(|| { - Raw::new(&RoomKeyBackup { + rooms + .entry(room_id) + .or_insert_with(|| RoomKeyBackup { sessions: BTreeMap::new(), }) - .expect("RoomKeyBackup serialization") - }); - - let mut object = room_key_backup - .deserialize_as::>() - .map_err(|_| Error::bad_database("RoomKeyBackup is not an object"))?; - - let sessions = object.entry("session").or_insert_with(|| json!({})); - if let serde_json::Value::Object(unsigned_object) = sessions { - unsigned_object.insert(session_id, key_data); - } - - *room_key_backup = Raw::from_json( - serde_json::value::to_raw_value(&object).expect("Value => RawValue serialization"), - ); + .sessions + .insert(session_id, key_data); } Ok(rooms) From c4317a7a962fcc4b41b1abfc273034d9827a6563 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 16:12:39 +0100 Subject: [PATCH 133/445] Reduce code duplication in media download route handlers --- src/client_server/media.rs | 99 ++++++++++++++++++++------------------ 1 file changed, 51 insertions(+), 48 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 5b196dff..bd73cff5 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -74,6 +74,38 @@ pub async fn create_content_route( .into()) } +pub async fn get_remote_content( + db: &DatabaseGuard, + mxc: &str, + server_name: &ruma::ServerName, + media_id: &str +) -> ConduitResult { + let content_response = db + .sending + .send_federation_request( + &db.globals, + server_name, + get_content::Request { + allow_remote: false, + server_name, + media_id + }, + ) + .await?; + + db.media + .create( + mxc.to_string(), + &db.globals, + &content_response.content_disposition.as_deref(), + &content_response.content_type.as_deref(), + &content_response.file, + ) + .await?; + + Ok(content_response.into()) +} + /// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` /// /// Load media from our server or over federation. @@ -103,30 +135,13 @@ pub async fn get_content_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_content_response = db - .sending - .send_federation_request( - &db.globals, - &body.server_name, - get_content::Request { - allow_remote: false, - server_name: &body.server_name, - media_id: &body.media_id, - }, - ) - .await?; - - db.media - .create( - mxc, - &db.globals, - &get_content_response.content_disposition.as_deref(), - &get_content_response.content_type.as_deref(), - &get_content_response.file, - ) - .await?; - - Ok(get_content_response.into()) + let remote_content_response = get_remote_content( + &db, + &mxc, + &body.server_name, + &body.media_id + ).await?; + Ok(remote_content_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -161,31 +176,19 @@ pub async fn get_content_as_filename_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_content_response = db - .sending - .send_federation_request( - &db.globals, - &body.server_name, - get_content_as_filename::Request { - allow_remote: false, - server_name: &body.server_name, - media_id: &body.media_id, - filename: &body.filename, - }, - ) - .await?; - - db.media - .create( - mxc, - &db.globals, - &get_content_response.content_disposition.as_deref(), - &get_content_response.content_type.as_deref(), - &get_content_response.file, - ) - .await?; + let remote_content_response = get_remote_content( + &db, + &mxc, + &body.server_name, + &body.media_id + ).await?; - Ok(get_content_response.into()) + Ok(get_content_as_filename::Response { + content_disposition: Some(format!("inline: filename={}", body.filename)), + content_type: remote_content_response.0.content_type, + file: remote_content_response.0.file + } + .into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } From ccfc243c2c1fec9b859ab6accec1246fa63aef94 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 17:00:08 +0100 Subject: [PATCH 134/445] Make get_remote_content() return Result instead of ConduitResult --- src/client_server/media.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index bd73cff5..dd8e7b0f 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -79,7 +79,7 @@ pub async fn get_remote_content( mxc: &str, server_name: &ruma::ServerName, media_id: &str -) -> ConduitResult { +) -> Result { let content_response = db .sending .send_federation_request( @@ -103,7 +103,7 @@ pub async fn get_remote_content( ) .await?; - Ok(content_response.into()) + Ok(content_response) } /// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` @@ -141,7 +141,7 @@ pub async fn get_content_route( &body.server_name, &body.media_id ).await?; - Ok(remote_content_response) + Ok(remote_content_response.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -185,8 +185,8 @@ pub async fn get_content_as_filename_route( Ok(get_content_as_filename::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), - content_type: remote_content_response.0.content_type, - file: remote_content_response.0.file + content_type: remote_content_response.content_type, + file: remote_content_response.file } .into()) } else { From 0f6d232cb1117d959a1984551ee2872558172767 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 17:08:04 +0100 Subject: [PATCH 135/445] Style fixes from 'cargo fmt' --- src/client_server/media.rs | 22 +++++++--------------- 1 file changed, 7 insertions(+), 15 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index dd8e7b0f..a827d64f 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -78,7 +78,7 @@ pub async fn get_remote_content( db: &DatabaseGuard, mxc: &str, server_name: &ruma::ServerName, - media_id: &str + media_id: &str, ) -> Result { let content_response = db .sending @@ -88,7 +88,7 @@ pub async fn get_remote_content( get_content::Request { allow_remote: false, server_name, - media_id + media_id, }, ) .await?; @@ -135,12 +135,8 @@ pub async fn get_content_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let remote_content_response = get_remote_content( - &db, - &mxc, - &body.server_name, - &body.media_id - ).await?; + let remote_content_response = + get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; Ok(remote_content_response.into()) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -176,17 +172,13 @@ pub async fn get_content_as_filename_route( } .into()) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let remote_content_response = get_remote_content( - &db, - &mxc, - &body.server_name, - &body.media_id - ).await?; + let remote_content_response = + get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; Ok(get_content_as_filename::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), content_type: remote_content_response.content_type, - file: remote_content_response.file + file: remote_content_response.file, } .into()) } else { From f8d1c1a8af122d8955b4b08fa564723badbb3f77 Mon Sep 17 00:00:00 2001 From: "Aode (Lion)" Date: Mon, 24 Jan 2022 18:42:15 -0600 Subject: [PATCH 136/445] Re-use a basic request in all possible cases --- src/appservice_server.rs | 6 +----- src/database/globals.rs | 40 ++++++++++++++++++++++++++++++---------- src/database/pusher.rs | 6 +----- src/server_server.rs | 23 ++++++++++++----------- 4 files changed, 44 insertions(+), 31 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index ed886d6c..a5d795f6 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -46,11 +46,7 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = globals - .reqwest_client()? - .build()? - .execute(reqwest_request) - .await?; + let mut response = globals.reqwest_client().execute(reqwest_request).await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/database/globals.rs b/src/database/globals.rs index 098d8197..da91c1fb 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -39,6 +39,7 @@ pub struct Globals { keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, + basic_client: reqwest::Client, pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, @@ -132,6 +133,8 @@ impl Globals { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); + let basic_client = reqwest_client_builder(&config, None)?.build()?; + let s = Self { globals, config, @@ -141,6 +144,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), tls_name_override, + basic_client, server_signingkeys, jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), @@ -163,17 +167,15 @@ impl Globals { &self.keypair } - /// Returns a reqwest client which can be used to send requests. - pub fn reqwest_client(&self) -> Result { - let mut reqwest_client_builder = reqwest::Client::builder() - .connect_timeout(Duration::from_secs(30)) - .timeout(Duration::from_secs(60 * 3)) - .pool_max_idle_per_host(1); - if let Some(proxy) = self.config.proxy.to_proxy()? { - reqwest_client_builder = reqwest_client_builder.proxy(proxy); - } + /// Returns a reqwest client which can be used to send requests + pub fn reqwest_client(&self) -> reqwest::Client { + // can't return &Client or else we'll hold a lock around the DB across an await + self.basic_client.clone() + } - Ok(reqwest_client_builder) + /// Returns a reqwest client builder which can be customized and used to send requests. + pub fn reqwest_client_builder(&self) -> Result { + reqwest_client_builder(&self.config, Some(1)) } #[tracing::instrument(skip(self))] @@ -340,3 +342,21 @@ impl Globals { r } } + +fn reqwest_client_builder( + config: &Config, + max_idle: Option, +) -> Result { + let mut reqwest_client_builder = reqwest::Client::builder() + .connect_timeout(Duration::from_secs(30)) + .timeout(Duration::from_secs(60 * 3)); + + if let Some(max_idle) = max_idle { + reqwest_client_builder = reqwest_client_builder.pool_max_idle_per_host(max_idle); + } + if let Some(proxy) = config.proxy.to_proxy()? { + reqwest_client_builder = reqwest_client_builder.proxy(proxy); + } + + Ok(reqwest_client_builder) +} diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 97ca85d8..d63db1d7 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -115,11 +115,7 @@ where //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = globals - .reqwest_client()? - .build()? - .execute(reqwest_request) - .await; + let response = globals.reqwest_client().execute(reqwest_request).await; match response { Ok(mut response) => { diff --git a/src/server_server.rs b/src/server_server.rs index 9129951b..205355f9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -237,21 +237,25 @@ where let url = reqwest_request.url().clone(); - let mut client = globals.reqwest_client()?; - if let Some((override_name, port)) = globals + let client = if let Some((override_name, port)) = globals .tls_name_override .read() .unwrap() .get(&actual_destination.hostname()) { - client = client.resolve( - &actual_destination.hostname(), - SocketAddr::new(override_name[0], *port), - ); + globals + .reqwest_client_builder()? + .resolve( + &actual_destination.hostname(), + SocketAddr::new(override_name[0], *port), + ) + .build()? // port will be ignored - } + } else { + globals.reqwest_client() + }; - let response = client.build()?.execute(reqwest_request).await; + let response = client.execute(reqwest_request).await; match response { Ok(mut response) => { @@ -492,9 +496,6 @@ async fn request_well_known( let body: serde_json::Value = serde_json::from_str( &globals .reqwest_client() - .ok()? - .build() - .ok()? .get(&format!( "https://{}/.well-known/matrix/server", destination From 1059f35fdcf4942fd253748121c883ea38b427a7 Mon Sep 17 00:00:00 2001 From: "Aode (lion)" Date: Thu, 27 Jan 2022 10:19:28 -0600 Subject: [PATCH 137/445] use pre-constructed client for well-known requests also --- Cargo.lock | 3 +-- Cargo.toml | 2 +- src/database/globals.rs | 30 ++++++++++++++++++------------ src/server_server.rs | 20 +------------------- 4 files changed, 21 insertions(+), 34 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 794a0257..21c27700 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1950,8 +1950,7 @@ dependencies = [ [[package]] name = "reqwest" version = "0.11.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f242f1488a539a79bac6dbe7c8609ae43b7914b7736210f239a37cccb32525" +source = "git+https://github.com/niuhuan/reqwest?branch=dns-resolver-fn#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ "base64 0.13.0", "bytes", diff --git a/Cargo.toml b/Cargo.toml index 9ba1ac05..974b4ce8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls", "socks"] } +reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/niuhuan/reqwest", branch = "dns-resolver-fn" } # Used for conduit::Error type thiserror = "1.0.28" # Used to generate thumbnails for images diff --git a/src/database/globals.rs b/src/database/globals.rs index da91c1fb..3278b7f6 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -10,7 +10,7 @@ use std::{ collections::{BTreeMap, HashMap}, fs, future::Future, - net::IpAddr, + net::{IpAddr, SocketAddr}, path::PathBuf, sync::{Arc, Mutex, RwLock}, time::{Duration, Instant}, @@ -39,6 +39,7 @@ pub struct Globals { keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, + well_known_client: reqwest::Client, basic_client: reqwest::Client, pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, @@ -133,7 +134,16 @@ impl Globals { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); - let basic_client = reqwest_client_builder(&config, None)?.build()?; + let basic_client = reqwest_client_builder(&config)?.build()?; + let name_override = Arc::clone(&tls_name_override); + let well_known_client = reqwest_client_builder(&config)? + .resolve_fn(move |domain| { + let read_guard = name_override.read().unwrap(); + let (override_name, port) = read_guard.get(&domain)?; + let first_name = override_name.get(0)?; + Some(SocketAddr::new(*first_name, *port)) + }) + .build()?; let s = Self { globals, @@ -144,6 +154,7 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), tls_name_override, + well_known_client, basic_client, server_signingkeys, jwt_decoding_key, @@ -173,9 +184,10 @@ impl Globals { self.basic_client.clone() } - /// Returns a reqwest client builder which can be customized and used to send requests. - pub fn reqwest_client_builder(&self) -> Result { - reqwest_client_builder(&self.config, Some(1)) + /// Returns a client used for resolving .well-knowns + pub fn well_known_client(&self) -> reqwest::Client { + // can't return &Client or else we'll hold a lock around the DB across an await + self.well_known_client.clone() } #[tracing::instrument(skip(self))] @@ -343,17 +355,11 @@ impl Globals { } } -fn reqwest_client_builder( - config: &Config, - max_idle: Option, -) -> Result { +fn reqwest_client_builder(config: &Config) -> Result { let mut reqwest_client_builder = reqwest::Client::builder() .connect_timeout(Duration::from_secs(30)) .timeout(Duration::from_secs(60 * 3)); - if let Some(max_idle) = max_idle { - reqwest_client_builder = reqwest_client_builder.pool_max_idle_per_host(max_idle); - } if let Some(proxy) = config.proxy.to_proxy()? { reqwest_client_builder = reqwest_client_builder.proxy(proxy); } diff --git a/src/server_server.rs b/src/server_server.rs index 205355f9..978eb67f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -237,25 +237,7 @@ where let url = reqwest_request.url().clone(); - let client = if let Some((override_name, port)) = globals - .tls_name_override - .read() - .unwrap() - .get(&actual_destination.hostname()) - { - globals - .reqwest_client_builder()? - .resolve( - &actual_destination.hostname(), - SocketAddr::new(override_name[0], *port), - ) - .build()? - // port will be ignored - } else { - globals.reqwest_client() - }; - - let response = client.execute(reqwest_request).await; + let response = globals.well_known_client().execute(reqwest_request).await; match response { Ok(mut response) => { From 529e88c7f9d8997a4fbbc84f98120d1b31d2e39e Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Thu, 27 Jan 2022 17:47:09 +0100 Subject: [PATCH 138/445] Do not copy mxc string unnecessarily in db.get_thumbnail() --- src/client_server/media.rs | 2 +- src/database/media.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index a827d64f..8524c57e 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -207,7 +207,7 @@ pub async fn get_content_thumbnail_route( }) = db .media .get_thumbnail( - mxc.clone(), + &mxc, &db.globals, body.width .try_into() diff --git a/src/database/media.rs b/src/database/media.rs index 46630131..a4bb4025 100644 --- a/src/database/media.rs +++ b/src/database/media.rs @@ -171,7 +171,7 @@ impl Media { /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. pub async fn get_thumbnail( &self, - mxc: String, + mxc: &str, globals: &Globals, width: u32, height: u32, From b39ddf7be9150b8baa6cecabed7730d2ab610a72 Mon Sep 17 00:00:00 2001 From: "Aode (lion)" Date: Fri, 28 Jan 2022 12:42:47 -0600 Subject: [PATCH 139/445] Rename reqwest clients, mention cheap client clones in comment --- src/appservice_server.rs | 2 +- src/database/globals.rs | 24 ++++++++++++------------ src/database/pusher.rs | 2 +- src/server_server.rs | 4 ++-- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index a5d795f6..e78fb344 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -46,7 +46,7 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = globals.reqwest_client().execute(reqwest_request).await?; + let mut response = globals.default_client().execute(reqwest_request).await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/database/globals.rs b/src/database/globals.rs index 3278b7f6..decd84c3 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -39,8 +39,8 @@ pub struct Globals { keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, - well_known_client: reqwest::Client, - basic_client: reqwest::Client, + federation_client: reqwest::Client, + default_client: reqwest::Client, pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, @@ -134,9 +134,9 @@ impl Globals { .as_ref() .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); - let basic_client = reqwest_client_builder(&config)?.build()?; + let default_client = reqwest_client_builder(&config)?.build()?; let name_override = Arc::clone(&tls_name_override); - let well_known_client = reqwest_client_builder(&config)? + let federation_client = reqwest_client_builder(&config)? .resolve_fn(move |domain| { let read_guard = name_override.read().unwrap(); let (override_name, port) = read_guard.get(&domain)?; @@ -154,8 +154,8 @@ impl Globals { })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), tls_name_override, - well_known_client, - basic_client, + federation_client, + default_client, server_signingkeys, jwt_decoding_key, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), @@ -179,15 +179,15 @@ impl Globals { } /// Returns a reqwest client which can be used to send requests - pub fn reqwest_client(&self) -> reqwest::Client { - // can't return &Client or else we'll hold a lock around the DB across an await - self.basic_client.clone() + pub fn default_client(&self) -> reqwest::Client { + // Client is cheap to clone (Arc wrapper) and avoids lifetime issues + self.default_client.clone() } /// Returns a client used for resolving .well-knowns - pub fn well_known_client(&self) -> reqwest::Client { - // can't return &Client or else we'll hold a lock around the DB across an await - self.well_known_client.clone() + pub fn federation_client(&self) -> reqwest::Client { + // Client is cheap to clone (Arc wrapper) and avoids lifetime issues + self.federation_client.clone() } #[tracing::instrument(skip(self))] diff --git a/src/database/pusher.rs b/src/database/pusher.rs index d63db1d7..bbe85a8d 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -115,7 +115,7 @@ where //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = globals.reqwest_client().execute(reqwest_request).await; + let response = globals.default_client().execute(reqwest_request).await; match response { Ok(mut response) => { diff --git a/src/server_server.rs b/src/server_server.rs index 978eb67f..c5e0b1a9 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -237,7 +237,7 @@ where let url = reqwest_request.url().clone(); - let response = globals.well_known_client().execute(reqwest_request).await; + let response = globals.federation_client().execute(reqwest_request).await; match response { Ok(mut response) => { @@ -477,7 +477,7 @@ async fn request_well_known( ) -> Option { let body: serde_json::Value = serde_json::from_str( &globals - .reqwest_client() + .default_client() .get(&format!( "https://{}/.well-known/matrix/server", destination From 44f7a85077e5c249ec618004b0386f3d66f01911 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 28 Jan 2022 22:19:19 +0100 Subject: [PATCH 140/445] fix: Use default port for healthcheck as fallback Conduit can start without a specific port being configured. This adjusts the healthcheck script to tolerate that state. Closes https://gitlab.com/famedly/conduit/-/issues/222 --- docker/healthcheck.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index 7ca04602..efc94917 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -1,10 +1,15 @@ #!/bin/sh # If the port is not specified as env var, take it from the config file -if [ -z ${CONDUIT_PORT} ]; then +if [ -z "${CONDUIT_PORT}" ]; then CONDUIT_PORT=$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*') fi +# If the config file also does not contain a default port, just use the default one: 6167. +if [ -z "${CONDUIT_PORT}" ]; then + CONDUIT_PORT=6167 +fi + # The actual health check. # We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. # TODO: Change this to a single wget call. Do we have a config value that we can check for that? From 401b88d16d43f0c58e5a4ccf777815fd8d538ff8 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 28 Jan 2022 23:23:58 +0100 Subject: [PATCH 141/445] fix: Healtcheck use netstat for port as fallback --- docker/healthcheck.sh | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index efc94917..df7f18a5 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -1,13 +1,9 @@ #!/bin/sh -# If the port is not specified as env var, take it from the config file +# If the config file does not contain a default port and the CONDUIT_PORT env is not set, create +# try to get port from process list if [ -z "${CONDUIT_PORT}" ]; then - CONDUIT_PORT=$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*') -fi - -# If the config file also does not contain a default port, just use the default one: 6167. -if [ -z "${CONDUIT_PORT}" ]; then - CONDUIT_PORT=6167 + CONDUIT_PORT=$(netstat -tlp | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') fi # The actual health check. From 8ff95a5a48c055549b3652a33faddc3d89351d91 Mon Sep 17 00:00:00 2001 From: user Date: Fri, 28 Jan 2022 22:26:56 -0800 Subject: [PATCH 142/445] fix: mention dependencies to build from source --- DEPLOY.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 38e1e286..d9f91e03 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -29,7 +29,11 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` -Alternatively, you may compile the binary yourself using +Alternatively, you may compile the binary yourself + +```bash +$ sudo apt install libclang-dev build-essential +``` ```bash $ cargo build --release From 677f044d13985f794afecdb0bbf62fbab3a52dec Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Sun, 30 Jan 2022 23:15:53 +0200 Subject: [PATCH 143/445] Refactor admin code to always defer command processing --- src/client_server/report.rs | 12 +- src/database/admin.rs | 300 ++++++++++++++++++------------------ src/database/rooms.rs | 8 +- 3 files changed, 153 insertions(+), 167 deletions(-) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index ae069849..032e446c 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,7 +1,4 @@ -use crate::{ - database::{admin::AdminCommand, DatabaseGuard}, - ConduitResult, Error, Ruma, -}; +use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, events::room::message, @@ -50,8 +47,8 @@ pub async fn report_event_route( )); }; - db.admin.send(AdminCommand::SendMessage( - message::RoomMessageEventContent::text_html( + db.admin + .send_message(message::RoomMessageEventContent::text_html( format!( "Report received from: {}\n\n\ Event ID: {}\n\ @@ -75,8 +72,7 @@ pub async fn report_event_route( body.score, RawStr::new(&body.reason).html_escape() ), - ), - )); + )); db.flush()?; diff --git a/src/database/admin.rs b/src/database/admin.rs index dbd20e44..ea08f65a 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -19,25 +19,21 @@ use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; use tracing::warn; -pub enum AdminCommand { - RegisterAppservice(serde_yaml::Value), - UnregisterAppservice(String), - ListAppservices, - ListLocalUsers, - ShowMemoryUsage, +pub enum AdminRoomEvent { + ProcessMessage(String), SendMessage(RoomMessageEventContent), } #[derive(Clone)] pub struct Admin { - pub sender: mpsc::UnboundedSender, + pub sender: mpsc::UnboundedSender, } impl Admin { pub fn start_handler( &self, db: Arc>, - mut receiver: mpsc::UnboundedReceiver, + mut receiver: mpsc::UnboundedReceiver, ) { tokio::spawn(async move { // TODO: Use futures when we have long admin commands @@ -56,7 +52,7 @@ impl Admin { .try_into() .expect("#admins:server_name is a valid room alias"), ) - .unwrap(); + .expect("Admin room must exist"); let conduit_room = match conduit_room { None => { @@ -105,46 +101,13 @@ impl Admin { let state_lock = mutex_state.lock().await; match event { - AdminCommand::ListLocalUsers => { - match guard.users.list_local_users() { - Ok(users) => { - let mut msg: String = format!("Found {} local user account(s):\n", users.len()); - msg += &users.join("\n"); - send_message(RoomMessageEventContent::text_plain(&msg), guard, &state_lock); - } - Err(e) => { - send_message(RoomMessageEventContent::text_plain(e.to_string()), guard, &state_lock); - } - } - } - AdminCommand::RegisterAppservice(yaml) => { - guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error - } - AdminCommand::UnregisterAppservice(service_name) => { - guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above + AdminRoomEvent::SendMessage(content) => { + send_message(content, guard, &state_lock); } - AdminCommand::ListAppservices => { - if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { - let count = appservices.len(); - let output = format!( - "Appservices ({}): {}", - count, - appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") - ); - send_message(RoomMessageEventContent::text_plain(output), guard, &state_lock); - } else { - send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); - } - } - AdminCommand::ShowMemoryUsage => { - if let Ok(response) = guard._db.memory_usage() { - send_message(RoomMessageEventContent::text_plain(response), guard, &state_lock); - } else { - send_message(RoomMessageEventContent::text_plain("Failed to get database memory usage.".to_owned()), guard, &state_lock); - } - } - AdminCommand::SendMessage(message) => { - send_message(message, guard, &state_lock); + AdminRoomEvent::ProcessMessage(room_message) => { + let reply_message = process_admin_message(&*guard, room_message); + + send_message(reply_message, guard, &state_lock); } } @@ -155,67 +118,81 @@ impl Admin { }); } - pub fn send(&self, command: AdminCommand) { - self.sender.unbounded_send(command).unwrap(); + pub fn process_message(&self, room_message: String) { + self.sender + .unbounded_send(AdminRoomEvent::ProcessMessage(room_message)) + .unwrap(); + } + + pub fn send_message(&self, message_content: RoomMessageEventContent) { + self.sender + .unbounded_send(AdminRoomEvent::SendMessage(message_content)) + .unwrap(); } } -// Parse chat messages from the admin room into an AdminCommand object -pub fn parse_admin_command(db: &Database, command_line: &str, body: Vec<&str>) -> AdminCommand { - let mut argv: Vec<_> = command_line.split_whitespace().skip(1).collect(); - - let command_name = match argv.get(0) { - Some(command) => *command, - None => { - let markdown_message = "No command given. Use `help` for a list of commands."; - let html_message = "No command given. Use help for a list of commands."; - - return AdminCommand::SendMessage(RoomMessageEventContent::text_html( - markdown_message, - html_message, - )); - } - }; +// Parse and process a message from the admin room +pub fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { + let mut lines = room_message.lines(); + let command_line = lines.next().expect("each string has at least one line"); + let body: Vec<_> = lines.collect(); - // Replace `help command` with `command --help` - // Clap has a help subcommand, but it omits the long help description. - if argv[0] == "help" { - argv.remove(0); - argv.push("--help"); - } + let admin_command = match parse_admin_command(&command_line) { + Ok(command) => command, + Err(error) => { + let message = error + .to_string() + .replace("example.com", db.globals.server_name().as_str()); + let html_message = usage_to_html(&message); - // Backwards compatibility with `register_appservice`-style commands - let command_with_dashes; - if argv[0].contains("_") { - command_with_dashes = argv[0].replace("_", "-"); - argv[0] = &command_with_dashes; - } + return RoomMessageEventContent::text_html(message, html_message); + } + }; - match try_parse_admin_command(db, argv, body) { - Ok(admin_command) => admin_command, + match process_admin_command(db, admin_command, body) { + Ok(reply_message) => reply_message, Err(error) => { let markdown_message = format!( - "Encountered an error while handling the `{}` command:\n\ + "Encountered an error while handling the command:\n\ ```\n{}\n```", - command_name, error, + error, ); let html_message = format!( - "Encountered an error while handling the {} command:\n\ + "Encountered an error while handling the command:\n\
              \n{}\n
              ", - command_name, error, + error, ); - AdminCommand::SendMessage(RoomMessageEventContent::text_html( - markdown_message, - html_message, - )) + RoomMessageEventContent::text_html(markdown_message, html_message) } } } +// Parse chat messages from the admin room into an AdminCommand object +fn parse_admin_command(command_line: &str) -> std::result::Result { + // Note: argv[0] is `@conduit:servername:`, which is treated as the main command + let mut argv: Vec<_> = command_line.split_whitespace().collect(); + + // Replace `help command` with `command --help` + // Clap has a help subcommand, but it omits the long help description. + if argv.len() > 1 && argv[1] == "help" { + argv.remove(1); + argv.push("--help"); + } + + // Backwards compatibility with `register_appservice`-style commands + let command_with_dashes; + if argv.len() > 1 && argv[1].contains("_") { + command_with_dashes = argv[1].replace("_", "-"); + argv[1] = &command_with_dashes; + } + + AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) +} + #[derive(Parser)] #[clap(name = "@conduit:example.com", version = env!("CARGO_PKG_VERSION"))] -enum AdminCommands { +enum AdminCommand { #[clap(verbatim_doc_comment)] /// Register an appservice using its registration YAML /// @@ -264,49 +241,70 @@ enum AdminCommands { DatabaseMemoryUsage, } -pub fn try_parse_admin_command( +fn process_admin_command( db: &Database, - mut argv: Vec<&str>, + command: AdminCommand, body: Vec<&str>, -) -> Result { - argv.insert(0, "@conduit:example.com:"); - let command = match AdminCommands::try_parse_from(argv) { - Ok(command) => command, - Err(error) => { - let message = error - .to_string() - .replace("example.com", db.globals.server_name().as_str()); - let html_message = usage_to_html(&message); - - return Ok(AdminCommand::SendMessage( - RoomMessageEventContent::text_html(message, html_message), - )); - } - }; - - let admin_command = match command { - AdminCommands::RegisterAppservice => { +) -> Result { + let reply_message_content = match command { + AdminCommand::RegisterAppservice => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let appservice_config = body[1..body.len() - 1].join("\n"); let parsed_config = serde_yaml::from_str::(&appservice_config); match parsed_config { - Ok(yaml) => AdminCommand::RegisterAppservice(yaml), - Err(e) => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - format!("Could not parse appservice config: {}", e), + Ok(yaml) => match db.appservice.register_appservice(yaml) { + Ok(()) => RoomMessageEventContent::text_plain("Appservice registered."), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to register appservice: {}", + e + )), + }, + Err(e) => RoomMessageEventContent::text_plain(format!( + "Could not parse appservice config: {}", + e )), } } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Expected code block in command body.", - )) + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) } } - AdminCommands::UnregisterAppservice { + AdminCommand::UnregisterAppservice { appservice_identifier, - } => AdminCommand::UnregisterAppservice(appservice_identifier), - AdminCommands::ListAppservices => AdminCommand::ListAppservices, - AdminCommands::ListLocalUsers => AdminCommand::ListLocalUsers, - AdminCommands::GetAuthChain { event_id } => { + } => match db.appservice.unregister_appservice(&appservice_identifier) { + Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to unregister appservice: {}", + e + )), + }, + AdminCommand::ListAppservices => { + if let Ok(appservices) = db.appservice.iter_ids().map(|ids| ids.collect::>()) { + let count = appservices.len(); + let output = format!( + "Appservices ({}): {}", + count, + appservices + .into_iter() + .filter_map(|r| r.ok()) + .collect::>() + .join(", ") + ); + RoomMessageEventContent::text_plain(output) + } else { + RoomMessageEventContent::text_plain("Failed to get appservices.") + } + } + AdminCommand::ListLocalUsers => match db.users.list_local_users() { + Ok(users) => { + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + RoomMessageEventContent::text_plain(&msg) + } + Err(e) => RoomMessageEventContent::text_plain(e.to_string()), + }, + AdminCommand::GetAuthChain { event_id } => { let event_id = Arc::::from(event_id); if let Some(event) = db.rooms.get_pdu_json(&event_id)? { let room_id_str = event @@ -320,17 +318,15 @@ pub fn try_parse_admin_command( let start = Instant::now(); let count = server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); let elapsed = start.elapsed(); - return Ok(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )), - )); + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )) } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain("Event not found.")) + RoomMessageEventContent::text_plain("Event not found.") } } - AdminCommands::ParsePdu => { + AdminCommand::ParsePdu => { if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { @@ -346,30 +342,26 @@ pub fn try_parse_admin_command( match serde_json::from_value::( serde_json::to_value(value).expect("value is json"), ) { - Ok(pdu) => { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - format!("EventId: {:?}\n{:#?}", event_id, pdu), - )) - } - Err(e) => AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\nCould not parse event: {}", - event_id, e - )), - ), + Ok(pdu) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\n{:#?}", + event_id, pdu + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), } } - Err(e) => AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - format!("Invalid json in command body: {}", e), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Invalid json in command body: {}", + e )), } } else { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain( - "Expected code block in command body.", - )) + RoomMessageEventContent::text_plain("Expected code block in command body.") } } - AdminCommands::GetPdu { event_id } => { + AdminCommand::GetPdu { event_id } => { let mut outlier = false; let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { @@ -380,7 +372,7 @@ pub fn try_parse_admin_command( Some(json) => { let json_text = serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - AdminCommand::SendMessage(RoomMessageEventContent::text_html( + RoomMessageEventContent::text_html( format!( "{}\n```json\n{}\n```", if outlier { @@ -399,17 +391,21 @@ pub fn try_parse_admin_command( }, RawStr::new(&json_text).html_escape() ), - )) - } - None => { - AdminCommand::SendMessage(RoomMessageEventContent::text_plain("PDU not found.")) + ) } + None => RoomMessageEventContent::text_plain("PDU not found."), } } - AdminCommands::DatabaseMemoryUsage => AdminCommand::ShowMemoryUsage, + AdminCommand::DatabaseMemoryUsage => match db._db.memory_usage() { + Ok(response) => RoomMessageEventContent::text_plain(response), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to get database memory usage: {}", + e + )), + }, }; - Ok(admin_command) + Ok(reply_message_content) } // Utility to turn clap's `--help` text to HTML. diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1f4566fe..2303b0dd 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3,7 +3,6 @@ mod edus; pub use edus::RoomEdus; use crate::{ - database::admin::parse_admin_command, pdu::{EventHash, PduBuilder}, utils, Database, Error, PduEvent, Result, }; @@ -1490,12 +1489,7 @@ impl Rooms { .as_ref() == Some(&pdu.room_id) { - let mut lines = body.lines(); - let command_line = lines.next().expect("each string has at least one line"); - let body: Vec<_> = lines.collect(); - - let command = parse_admin_command(db, command_line, body); - db.admin.send(command); + db.admin.process_message(body.to_string()); } } } From cc13112592b1666e75b4e5d0d340d6124afe4071 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 09:27:31 +0100 Subject: [PATCH 144/445] Cleanup appservice events after removing the appservice --- src/database/admin.rs | 13 ++++++++++++- src/database/sending.rs | 31 +++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 81e98393..9895a83b 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -112,7 +112,18 @@ impl Admin { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } AdminCommand::UnregisterAppservice(service_name) => { - guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above + if let Ok(_) = guard.appservice.unregister_appservice(&service_name) { + if let Ok(_) = guard.sending.cleanup_events(&service_name) { + let msg: String = format!("OK. Appservice {} removed", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } else { + let msg: String = format!("WARN: Appservice {} removed, but failed to cleanup events", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } + } else { + let msg: String = format!("ERR. Appservice {} not removed", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } } AdminCommand::ListAppservices => { if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { diff --git a/src/database/sending.rs b/src/database/sending.rs index 69f7c444..af4ac676 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -480,6 +480,26 @@ impl Sending { hash.as_ref().to_owned() } + /// Cleanup event data + /// Used for instance after we remove an appservice registration + /// + #[tracing::instrument(skip(self))] + pub fn cleanup_events(&self, key_id: &str) -> Result<()> { + let mut prefix = b"+".to_vec(); + prefix.extend_from_slice(key_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key).unwrap(); + } + + for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { + self.servernameevent_data.remove(&key).unwrap(); + } + + Ok(()) + } + #[tracing::instrument(skip(db, events, kind))] async fn handle_events( kind: OutgoingKind, @@ -520,8 +540,15 @@ impl Sending { &db.globals, db.appservice .get_registration(server.as_str()) - .unwrap() - .unwrap(), // TODO: handle error + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database( + "[Appservice] Could not load registration from db.", + ), + ) + })?, appservice::event::push_events::v1::Request { events: &pdu_jsons, txn_id: (&*base64::encode_config( From 78502aa6b10d97f3af3fe006fcdbd19b585d3b58 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 10:07:49 +0100 Subject: [PATCH 145/445] add error handling for register_appservice too --- src/database/admin.rs | 13 ++++++++++++- src/database/appservice.rs | 4 ++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 9895a83b..eef6ce10 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -109,7 +109,18 @@ impl Admin { } } AdminCommand::RegisterAppservice(yaml) => { - guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error + match guard.appservice.register_appservice(yaml) { + Ok(Some(id)) => { + let msg: String = format!("OK. Appservice {} created", id); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } + Ok(None) => { + send_message(RoomMessageEventContent::text_plain("WARN. Appservice created, but its ID was not returned!"), guard, &state_lock); + } + Err(_) => { + send_message(RoomMessageEventContent::text_plain("ERR: Failed register appservice. Check server log"), guard, &state_lock); + } + } } AdminCommand::UnregisterAppservice(service_name) => { if let Ok(_) = guard.appservice.unregister_appservice(&service_name) { diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 88de1f33..8b29aca9 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -12,7 +12,7 @@ pub struct Appservice { } impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<()> { + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result> { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -24,7 +24,7 @@ impl Appservice { .unwrap() .insert(id.to_owned(), yaml); - Ok(()) + Ok(Some(id.to_owned())) } /// Remove an appservice registration From 28d3b348d2fc23e6b2b78c468f682018ab472652 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 11:52:33 +0100 Subject: [PATCH 146/445] Return the ID of the appservice that was created by register_appservice --- src/database/admin.rs | 5 +---- src/database/appservice.rs | 8 +++++--- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index eef6ce10..a214796b 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -110,13 +110,10 @@ impl Admin { } AdminCommand::RegisterAppservice(yaml) => { match guard.appservice.register_appservice(yaml) { - Ok(Some(id)) => { + Ok(id) => { let msg: String = format!("OK. Appservice {} created", id); send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); } - Ok(None) => { - send_message(RoomMessageEventContent::text_plain("WARN. Appservice created, but its ID was not returned!"), guard, &state_lock); - } Err(_) => { send_message(RoomMessageEventContent::text_plain("ERR: Failed register appservice. Check server log"), guard, &state_lock); } diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 8b29aca9..edd5009b 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -12,7 +12,9 @@ pub struct Appservice { } impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result> { + /// Registers an appservice and returns the ID to the caller + /// + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -22,9 +24,9 @@ impl Appservice { self.cached_registrations .write() .unwrap() - .insert(id.to_owned(), yaml); + .insert(id.to_owned(), yaml.to_owned()); - Ok(Some(id.to_owned())) + Ok(id.to_owned()) } /// Remove an appservice registration From e17bbdd42d3245f2fb3730753f1feb51cd452207 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 28 Jan 2022 17:26:43 +0100 Subject: [PATCH 147/445] tests --- Cargo.toml | 2 +- src/database.rs | 14 +++++++------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 78a4c8ff..0089e7f8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication diff --git a/src/database.rs b/src/database.rs index 4f230f32..79b82088 100644 --- a/src/database.rs +++ b/src/database.rs @@ -130,7 +130,7 @@ fn default_db_cache_capacity_mb() -> f64 { } fn default_rocksdb_max_open_files() -> i32 { - 512 + 20 } fn default_pdu_cache_capacity() -> u32 { @@ -361,15 +361,15 @@ impl Database { .try_into() .expect("pdu cache capacity fits into usize"), )), - auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), - shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), - eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), - shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), - statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), + auth_chain_cache: Mutex::new(LruCache::new(100_000)), + shorteventid_cache: Mutex::new(LruCache::new(100_000)), + eventidshort_cache: Mutex::new(LruCache::new(100_000)), + shortstatekey_cache: Mutex::new(LruCache::new(100_000)), + statekeyshort_cache: Mutex::new(LruCache::new(100_000)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new(1000)), + stateinfo_cache: Mutex::new(LruCache::new(100)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, From 23aecb78c7c5ba5872a058f806cd722787eefc10 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 31 Jan 2022 15:39:46 +0100 Subject: [PATCH 148/445] fix: use to_lowercase on /register/available username --- src/client_server/account.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index c4e118c9..80c6f702 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -60,7 +60,7 @@ pub async fn get_register_available_route( body: Ruma>, ) -> ConduitResult { // Validate user id - let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) + let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) .ok() .filter(|user_id| { !user_id.is_historical() && user_id.server_name() == db.globals.server_name() From caf9834e50b540fc48bf8cf50dc439c57c503de9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 1 Feb 2022 14:42:13 +0100 Subject: [PATCH 149/445] feat: cache capacity modifier --- src/database.rs | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/src/database.rs b/src/database.rs index 79b82088..449d71bd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -49,6 +49,8 @@ pub struct Config { database_path: String, #[serde(default = "default_db_cache_capacity_mb")] db_cache_capacity_mb: f64, + #[serde(default = "default_conduit_cache_capacity_modifier")] + conduit_cache_capacity_modifier: f64, #[serde(default = "default_rocksdb_max_open_files")] rocksdb_max_open_files: i32, #[serde(default = "default_pdu_cache_capacity")] @@ -129,6 +131,10 @@ fn default_db_cache_capacity_mb() -> f64 { 10.0 } +fn default_conduit_cache_capacity_modifier() -> f64 { + 1.0 +} + fn default_rocksdb_max_open_files() -> i32 { 20 } @@ -361,15 +367,15 @@ impl Database { .try_into() .expect("pdu cache capacity fits into usize"), )), - auth_chain_cache: Mutex::new(LruCache::new(100_000)), - shorteventid_cache: Mutex::new(LruCache::new(100_000)), - eventidshort_cache: Mutex::new(LruCache::new(100_000)), - shortstatekey_cache: Mutex::new(LruCache::new(100_000)), - statekeyshort_cache: Mutex::new(LruCache::new(100_000)), + auth_chain_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + shorteventid_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + eventidshort_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + shortstatekey_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + statekeyshort_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new(100)), + stateinfo_cache: Mutex::new(LruCache::new((100.0 * config.conduit_cache_capacity_modifier) as usize)), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, From fa4099b138b3a4cdf6727acb14c47f20ace5f38e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 1 Feb 2022 23:51:38 +0000 Subject: [PATCH 150/445] Use prebuilt CI-containers from https://gitlab.com/jfowl/conduit-containers Also run all builds on approved MRs --- .gitlab-ci.yml | 39 ++++++++++++++++----------------------- Cross.toml | 8 ++++---- cross/build.sh | 31 ------------------------------- cross/test.sh | 8 -------- 4 files changed, 20 insertions(+), 66 deletions(-) delete mode 100755 cross/build.sh delete mode 100755 cross/test.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 741b5327..6f1a19f0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,8 +24,9 @@ variables: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - if: "$CI_COMMIT_TAG" + - if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set. interruptible: true - image: "rust:1.58" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" tags: ["docker"] services: ["docker:dind"] variables: @@ -36,27 +37,23 @@ variables: before_script: - 'echo "Building for target $TARGET"' - "rustup show && rustc --version && cargo --version" # Print version info for debugging - # install cross-compiling prerequisites - - 'apt-get update && apt-get install -y docker.io && docker version' # install docker - - 'cargo install cross && cross --version' # install cross # fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227) - - 'mkdir -p $SHARED_PATH/cargo' - - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - - 'cp -r $RUSTUP_HOME $SHARED_PATH' - - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + - "mkdir -p $SHARED_PATH/cargo" + - "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo" + - "cp -r $RUSTUP_HOME $SHARED_PATH" + - "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results. - # The sccache binary is stored in the sysroot of the rustc installation since that directory is added to the path of the cross container. - - if [ -n "${SCCACHE_BIN_URL}" ]; then RUSTC_SYSROOT=$(rustc --print sysroot) && curl $SCCACHE_BIN_URL --output $RUSTC_SYSROOT/bin/sccache && chmod +x $RUSTC_SYSROOT/bin/sccache && export RUSTC_WRAPPER=sccache; fi + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi script: # cross-compile conduit for target - - 'time ./cross/build.sh --locked --release' + - 'time cross build --target="$TARGET" --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' # print information about linking for debugging - - 'file conduit-$TARGET' # print file information + - "file conduit-$TARGET" # print file information - 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci - key: 'cargo-cache-$TARGET' + key: "cargo-cache-$TARGET" paths: - $SHARED_PATH/cargo/registry/index - $SHARED_PATH/cargo/registry/cache @@ -125,10 +122,10 @@ build:release:cargo:aarch64-unknown-linux-musl: key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: # cross-compile conduit for target - - 'time ./cross/build.sh --locked' + - 'time time cross build --target="$TARGET" --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' # print information about linking for debugging - - 'file conduit-debug-$TARGET' # print file information + - "file conduit-debug-$TARGET" # print file information - 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked artifacts: expire_in: 4 weeks @@ -230,24 +227,20 @@ docker:master:dockerhub: test:cargo: stage: "test" needs: [] - image: "rust:latest" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" tags: ["docker"] variables: CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow interruptible: true before_script: - # - mkdir -p $CARGO_HOME - - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config libclang-dev - rustup component add clippy rustfmt - - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check - - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - - "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" + - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: when: always reports: diff --git a/Cross.toml b/Cross.toml index a989a98f..a1387b43 100644 --- a/Cross.toml +++ b/Cross.toml @@ -11,13 +11,13 @@ passthrough = [ ] [target.aarch64-unknown-linux-musl] -image = "rust-cross:aarch64-unknown-linux-musl" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest" [target.arm-unknown-linux-musleabihf] -image = "rust-cross:arm-unknown-linux-musleabihf" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest" [target.armv7-unknown-linux-musleabihf] -image = "rust-cross:armv7-unknown-linux-musleabihf" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" [target.x86_64-unknown-linux-musl] -image = "rust-cross:x86_64-unknown-linux-musl" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl:latest" diff --git a/cross/build.sh b/cross/build.sh deleted file mode 100755 index 8f64ff87..00000000 --- a/cross/build.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -set -ex - -# build custom container with libclang and static compilation -tag="rust-cross:${TARGET:?}" -docker build --tag="$tag" - << EOF -FROM rustembedded/cross:$TARGET - -# Install libclang for generating bindings with rust-bindgen -# The architecture is not relevant here since it's not used for compilation -RUN apt-get update && \ - apt-get install --assume-yes libclang-dev - -# Set the target prefix -ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's/-unknown//')" - -# Make sure that cc-rs links libc/libstdc++ statically when cross-compiling -# See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information -ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" -# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-static-libgcc -Clink-arg=-lgcc -lstatic=atomic -lstatic=c"') -# Strip symbols while compiling in release mode -$([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') - -# Make sure that rust-bindgen uses the correct include path when cross-compiling -# See https://github.com/rust-lang/rust-bindgen#environment-variables for more information -ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" -EOF - -# build conduit for a specific target -cross build --target="$TARGET" $@ diff --git a/cross/test.sh b/cross/test.sh deleted file mode 100755 index 0aa0909c..00000000 --- a/cross/test.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env sh -set -ex - -# Build conduit for a specific target -cross/build.sh $@ - -# Test conduit for a specific target -cross test --target="$TARGET" $@ From a5f004d7e9c783caf280884d7fd332c7bafa67ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 2 Feb 2022 12:36:55 +0100 Subject: [PATCH 151/445] fix: signature mismatch on odd send_join servers --- Cargo.toml | 2 +- src/client_server/account.rs | 19 ++++++++++--------- src/client_server/membership.rs | 17 +++++++++-------- src/database.rs | 24 ++++++++++++++++++------ 4 files changed, 38 insertions(+), 24 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 0089e7f8..78a4c8ff 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 80c6f702..ff348545 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -60,15 +60,16 @@ pub async fn get_register_available_route( body: Ruma>, ) -> ConduitResult { // Validate user id - let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) - .ok() - .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == db.globals.server_name() - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let user_id = + UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) + .ok() + .filter(|user_id| { + !user_id.is_historical() && user_id.server_name() == db.globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; // Check if username is creative enough if db.users.exists(&user_id)? { diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 70352784..216c4c0a 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -655,7 +655,7 @@ async fn join_room_by_id_helper( db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; - let pdu = PduEvent::from_id_val(event_id, join_event.clone()) + let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = HashMap::new(); @@ -695,14 +695,15 @@ async fn join_room_by_id_helper( } let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey( - &pdu.kind, - pdu.state_key + &parsed_pdu.kind, + parsed_pdu + .state_key .as_ref() .expect("Pdu is a membership state event"), &db.globals, )?; - state.insert(incoming_shortstatekey, pdu.event_id.clone()); + state.insert(incoming_shortstatekey, parsed_pdu.event_id.clone()); let create_shortstatekey = db .rooms @@ -738,12 +739,12 @@ async fn join_room_by_id_helper( // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; + let statehashid = db.rooms.append_to_state(&parsed_pdu, &db.globals)?; db.rooms.append_pdu( - &pdu, - utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), - iter::once(&*pdu.event_id), + &parsed_pdu, + join_event, + iter::once(&*parsed_pdu.event_id), db, )?; diff --git a/src/database.rs b/src/database.rs index 449d71bd..8d245b7f 100644 --- a/src/database.rs +++ b/src/database.rs @@ -367,15 +367,27 @@ impl Database { .try_into() .expect("pdu cache capacity fits into usize"), )), - auth_chain_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), - shorteventid_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), - eventidshort_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), - shortstatekey_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), - statekeyshort_cache: Mutex::new(LruCache::new((100_000.0 * config.conduit_cache_capacity_modifier) as usize)), + auth_chain_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shorteventid_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + eventidshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shortstatekey_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + statekeyshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new((100.0 * config.conduit_cache_capacity_modifier) as usize)), + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, From bfcf2db497ffab518b946922205fb9a5661d8c27 Mon Sep 17 00:00:00 2001 From: user Date: Fri, 28 Jan 2022 22:26:56 -0800 Subject: [PATCH 152/445] fix: mention dependencies to build from source --- DEPLOY.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 38e1e286..d9f91e03 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -29,7 +29,11 @@ $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` -Alternatively, you may compile the binary yourself using +Alternatively, you may compile the binary yourself + +```bash +$ sudo apt install libclang-dev build-essential +``` ```bash $ cargo build --release From da7b55b39c1ea592c0d5ec86a1988465bedaad0e Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 09:27:31 +0100 Subject: [PATCH 153/445] Cleanup appservice events after removing the appservice --- src/database/admin.rs | 13 ++++++++++++- src/database/sending.rs | 31 +++++++++++++++++++++++++++++-- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 81e98393..9895a83b 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -112,7 +112,18 @@ impl Admin { guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error } AdminCommand::UnregisterAppservice(service_name) => { - guard.appservice.unregister_appservice(&service_name).unwrap(); // TODO: see above + if let Ok(_) = guard.appservice.unregister_appservice(&service_name) { + if let Ok(_) = guard.sending.cleanup_events(&service_name) { + let msg: String = format!("OK. Appservice {} removed", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } else { + let msg: String = format!("WARN: Appservice {} removed, but failed to cleanup events", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } + } else { + let msg: String = format!("ERR. Appservice {} not removed", service_name); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } } AdminCommand::ListAppservices => { if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { diff --git a/src/database/sending.rs b/src/database/sending.rs index 69f7c444..af4ac676 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -480,6 +480,26 @@ impl Sending { hash.as_ref().to_owned() } + /// Cleanup event data + /// Used for instance after we remove an appservice registration + /// + #[tracing::instrument(skip(self))] + pub fn cleanup_events(&self, key_id: &str) -> Result<()> { + let mut prefix = b"+".to_vec(); + prefix.extend_from_slice(key_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key).unwrap(); + } + + for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { + self.servernameevent_data.remove(&key).unwrap(); + } + + Ok(()) + } + #[tracing::instrument(skip(db, events, kind))] async fn handle_events( kind: OutgoingKind, @@ -520,8 +540,15 @@ impl Sending { &db.globals, db.appservice .get_registration(server.as_str()) - .unwrap() - .unwrap(), // TODO: handle error + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database( + "[Appservice] Could not load registration from db.", + ), + ) + })?, appservice::event::push_events::v1::Request { events: &pdu_jsons, txn_id: (&*base64::encode_config( From 8f69f02e592299dbe3713e238b94b19bfc445ec8 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 10:07:49 +0100 Subject: [PATCH 154/445] add error handling for register_appservice too --- src/database/admin.rs | 13 ++++++++++++- src/database/appservice.rs | 4 ++-- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 9895a83b..eef6ce10 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -109,7 +109,18 @@ impl Admin { } } AdminCommand::RegisterAppservice(yaml) => { - guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error + match guard.appservice.register_appservice(yaml) { + Ok(Some(id)) => { + let msg: String = format!("OK. Appservice {} created", id); + send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); + } + Ok(None) => { + send_message(RoomMessageEventContent::text_plain("WARN. Appservice created, but its ID was not returned!"), guard, &state_lock); + } + Err(_) => { + send_message(RoomMessageEventContent::text_plain("ERR: Failed register appservice. Check server log"), guard, &state_lock); + } + } } AdminCommand::UnregisterAppservice(service_name) => { if let Ok(_) = guard.appservice.unregister_appservice(&service_name) { diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 88de1f33..8b29aca9 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -12,7 +12,7 @@ pub struct Appservice { } impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<()> { + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result> { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -24,7 +24,7 @@ impl Appservice { .unwrap() .insert(id.to_owned(), yaml); - Ok(()) + Ok(Some(id.to_owned())) } /// Remove an appservice registration From e24d75cffc8f00d526848a93a4e2cfce54bf69a2 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Mon, 31 Jan 2022 11:52:33 +0100 Subject: [PATCH 155/445] Return the ID of the appservice that was created by register_appservice --- src/database/admin.rs | 5 +---- src/database/appservice.rs | 8 +++++--- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index eef6ce10..a214796b 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -110,13 +110,10 @@ impl Admin { } AdminCommand::RegisterAppservice(yaml) => { match guard.appservice.register_appservice(yaml) { - Ok(Some(id)) => { + Ok(id) => { let msg: String = format!("OK. Appservice {} created", id); send_message(RoomMessageEventContent::text_plain(msg), guard, &state_lock); } - Ok(None) => { - send_message(RoomMessageEventContent::text_plain("WARN. Appservice created, but its ID was not returned!"), guard, &state_lock); - } Err(_) => { send_message(RoomMessageEventContent::text_plain("ERR: Failed register appservice. Check server log"), guard, &state_lock); } diff --git a/src/database/appservice.rs b/src/database/appservice.rs index 8b29aca9..edd5009b 100644 --- a/src/database/appservice.rs +++ b/src/database/appservice.rs @@ -12,7 +12,9 @@ pub struct Appservice { } impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result> { + /// Registers an appservice and returns the ID to the caller + /// + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -22,9 +24,9 @@ impl Appservice { self.cached_registrations .write() .unwrap() - .insert(id.to_owned(), yaml); + .insert(id.to_owned(), yaml.to_owned()); - Ok(Some(id.to_owned())) + Ok(id.to_owned()) } /// Remove an appservice registration From 9478c75f9dcd040cb9f03deb5ea809f117985de2 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 1 Feb 2022 23:51:38 +0000 Subject: [PATCH 156/445] Use prebuilt CI-containers from https://gitlab.com/jfowl/conduit-containers Also run all builds on approved MRs --- .gitlab-ci.yml | 39 ++++++++++--------------- Cross.toml | 8 ++--- cross/build.sh | 31 -------------------- cross/test.sh | 8 ----- docker/ci-binaries-packaging.Dockerfile | 4 ++- docker/healthcheck.sh | 2 +- 6 files changed, 24 insertions(+), 68 deletions(-) delete mode 100755 cross/build.sh delete mode 100755 cross/test.sh diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 741b5327..6f1a19f0 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -24,8 +24,9 @@ variables: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - if: "$CI_COMMIT_TAG" + - if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set. interruptible: true - image: "rust:1.58" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" tags: ["docker"] services: ["docker:dind"] variables: @@ -36,27 +37,23 @@ variables: before_script: - 'echo "Building for target $TARGET"' - "rustup show && rustc --version && cargo --version" # Print version info for debugging - # install cross-compiling prerequisites - - 'apt-get update && apt-get install -y docker.io && docker version' # install docker - - 'cargo install cross && cross --version' # install cross # fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227) - - 'mkdir -p $SHARED_PATH/cargo' - - 'cp -r $CARGO_HOME/bin $SHARED_PATH/cargo' - - 'cp -r $RUSTUP_HOME $SHARED_PATH' - - 'export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup' + - "mkdir -p $SHARED_PATH/cargo" + - "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo" + - "cp -r $RUSTUP_HOME $SHARED_PATH" + - "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup" # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results. - # The sccache binary is stored in the sysroot of the rustc installation since that directory is added to the path of the cross container. - - if [ -n "${SCCACHE_BIN_URL}" ]; then RUSTC_SYSROOT=$(rustc --print sysroot) && curl $SCCACHE_BIN_URL --output $RUSTC_SYSROOT/bin/sccache && chmod +x $RUSTC_SYSROOT/bin/sccache && export RUSTC_WRAPPER=sccache; fi + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi script: # cross-compile conduit for target - - 'time ./cross/build.sh --locked --release' + - 'time cross build --target="$TARGET" --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' # print information about linking for debugging - - 'file conduit-$TARGET' # print file information + - "file conduit-$TARGET" # print file information - 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked cache: # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci - key: 'cargo-cache-$TARGET' + key: "cargo-cache-$TARGET" paths: - $SHARED_PATH/cargo/registry/index - $SHARED_PATH/cargo/registry/cache @@ -125,10 +122,10 @@ build:release:cargo:aarch64-unknown-linux-musl: key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: # cross-compile conduit for target - - 'time ./cross/build.sh --locked' + - 'time time cross build --target="$TARGET" --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' # print information about linking for debugging - - 'file conduit-debug-$TARGET' # print file information + - "file conduit-debug-$TARGET" # print file information - 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked artifacts: expire_in: 4 weeks @@ -230,24 +227,20 @@ docker:master:dockerhub: test:cargo: stage: "test" needs: [] - image: "rust:latest" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" tags: ["docker"] variables: CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow interruptible: true before_script: - # - mkdir -p $CARGO_HOME - - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config libclang-dev - rustup component add clippy rustfmt - - curl "https://faulty-storage.de/gitlab-report" --output ./gitlab-report && chmod +x ./gitlab-report # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - - if [ -n "${SCCACHE_BIN_URL}" ]; then curl $SCCACHE_BIN_URL --output /sccache && chmod +x /sccache && export RUSTC_WRAPPER=/sccache; fi + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check - - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | ./gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - - "cargo clippy --color always --verbose --message-format=json | ./gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" + - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: when: always reports: diff --git a/Cross.toml b/Cross.toml index a989a98f..a1387b43 100644 --- a/Cross.toml +++ b/Cross.toml @@ -11,13 +11,13 @@ passthrough = [ ] [target.aarch64-unknown-linux-musl] -image = "rust-cross:aarch64-unknown-linux-musl" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest" [target.arm-unknown-linux-musleabihf] -image = "rust-cross:arm-unknown-linux-musleabihf" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest" [target.armv7-unknown-linux-musleabihf] -image = "rust-cross:armv7-unknown-linux-musleabihf" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" [target.x86_64-unknown-linux-musl] -image = "rust-cross:x86_64-unknown-linux-musl" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl:latest" diff --git a/cross/build.sh b/cross/build.sh deleted file mode 100755 index 8f64ff87..00000000 --- a/cross/build.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/bash -set -ex - -# build custom container with libclang and static compilation -tag="rust-cross:${TARGET:?}" -docker build --tag="$tag" - << EOF -FROM rustembedded/cross:$TARGET - -# Install libclang for generating bindings with rust-bindgen -# The architecture is not relevant here since it's not used for compilation -RUN apt-get update && \ - apt-get install --assume-yes libclang-dev - -# Set the target prefix -ENV TARGET_PREFIX="/usr/local/$(echo "${TARGET:?}" | sed -e 's/armv7/arm/' -e 's/-unknown//')" - -# Make sure that cc-rs links libc/libstdc++ statically when cross-compiling -# See https://github.com/alexcrichton/cc-rs#external-configuration-via-environment-variables for more information -ENV RUSTFLAGS="-L\$TARGET_PREFIX/lib" CXXSTDLIB="static=stdc++" -# Forcefully linking against libatomic, libc and libgcc is required for arm32, otherwise symbols are missing -$([[ $TARGET =~ arm ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-static-libgcc -Clink-arg=-lgcc -lstatic=atomic -lstatic=c"') -# Strip symbols while compiling in release mode -$([[ $@ =~ -r ]] && echo 'ENV RUSTFLAGS="$RUSTFLAGS -Clink-arg=-s"') - -# Make sure that rust-bindgen uses the correct include path when cross-compiling -# See https://github.com/rust-lang/rust-bindgen#environment-variables for more information -ENV BINDGEN_EXTRA_CLANG_ARGS="-I\$TARGET_PREFIX/include" -EOF - -# build conduit for a specific target -cross build --target="$TARGET" $@ diff --git a/cross/test.sh b/cross/test.sh deleted file mode 100755 index 0aa0909c..00000000 --- a/cross/test.sh +++ /dev/null @@ -1,8 +0,0 @@ -#!/usr/bin/env sh -set -ex - -# Build conduit for a specific target -cross/build.sh $@ - -# Test conduit for a specific target -cross test --target="$TARGET" $@ diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index a6339be3..bb67bb22 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -19,8 +19,10 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" # Conduit needs: # ca-certificates: for https +# iproute2: for `ss` for the healthcheck script RUN apk add --no-cache \ - ca-certificates + ca-certificates \ + iproute2 ARG CREATED diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index df7f18a5..42b2e103 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -3,7 +3,7 @@ # If the config file does not contain a default port and the CONDUIT_PORT env is not set, create # try to get port from process list if [ -z "${CONDUIT_PORT}" ]; then - CONDUIT_PORT=$(netstat -tlp | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') + CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') fi # The actual health check. From e5bac5e4f53fa3e6565cca96b687dc8ff976f7f0 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 2 Feb 2022 14:07:35 +0100 Subject: [PATCH 157/445] fix: Running in Docker --- Dockerfile | 44 ++++++++++++------------- conduit-example.toml | 1 + docker/ci-binaries-packaging.Dockerfile | 5 +-- 3 files changed, 26 insertions(+), 24 deletions(-) diff --git a/Dockerfile b/Dockerfile index b629690d..0da4aace 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,9 +1,9 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.58-alpine AS builder +FROM docker.io/rust:1.58-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies -RUN apk add musl-dev +RUN apt update && apt -y install libclang-11-dev # == Build dependencies without our own code separately for caching == # @@ -26,28 +26,28 @@ COPY src src # Builds conduit and places the binary at /usr/src/conduit/target/release/conduit RUN touch src/main.rs && touch src/lib.rs && cargo build --release - - - # --------------------------------------------------------------------------------------------------------------- # Stuff below this line actually ends up in the resulting docker image # --------------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.15.0 AS runner +FROM docker.io/debian:bullseye-slim AS runner # Standard port on which Conduit launches. # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" +# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs. +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ + CONDUIT_PORT=6167 # Conduit needs: # ca-certificates: for https -# libgcc: Apparently this is needed, even if I (@jfowl) don't know exactly why. But whatever, it's not that big. -RUN apk add --no-cache \ +# iproute2 & wget: for the healthcheck script +RUN apt update && apt -y install \ ca-certificates \ - libgcc + iproute2 \ + wget +RUN rm -rf /var/lib/apt/lists/* # Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit @@ -59,20 +59,20 @@ HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh # Copy over the actual Conduit binary from the builder stage COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit -# Improve security: Don't run stuff as root, that does not need to run as root: -# Add www-data user and group with UID 82, as used by alpine -# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install +# Improve security: Don't run stuff as root, that does not need to run as root +# Add 'conduit' user and group (100:82). The UID:GID choice is to be compatible +# with previous, Alpine-based containers, where the user and group were both +# named 'www-data'. RUN set -x ; \ - addgroup -Sg 82 www-data 2>/dev/null ; \ - adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \ - addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 + groupadd -r -g 82 conduit ; \ + useradd -r -M -d /srv/conduit -o -u 100 -g conduit conduit && exit 0 ; exit 1 -# Change ownership of Conduit files to www-data user and group -RUN chown -cR www-data:www-data /srv/conduit -RUN chmod +x /srv/conduit/healthcheck.sh +# Change ownership of Conduit files to conduit user and group and make the healthcheck executable: +RUN chown -cR conduit:conduit /srv/conduit && \ + chmod +x /srv/conduit/healthcheck.sh -# Change user to www-data -USER www-data +# Change user to conduit, no root permissions afterwards: +USER conduit # Set container home directory WORKDIR /srv/conduit diff --git a/conduit-example.toml b/conduit-example.toml index c0274a4d..f1578078 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -22,6 +22,7 @@ database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port # 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = 6167 # Max size for uploads diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index bb67bb22..3731bac1 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -14,8 +14,9 @@ FROM docker.io/alpine:3.15.0 AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove this in the future and just have the Docker version be configured with envs. -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" +# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs. +ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ + CONDUIT_PORT=6167 # Conduit needs: # ca-certificates: for https From c4733676cf16267ffbb0b348848e87a7d103cf37 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 2 Feb 2022 13:35:15 +0000 Subject: [PATCH 158/445] Apply feedback from Ticho --- Dockerfile | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/Dockerfile b/Dockerfile index 0da4aace..b631f297 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,7 @@ FROM docker.io/rust:1.58-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies -RUN apt update && apt -y install libclang-11-dev +RUN apt update && apt -y install libclang-dev # == Build dependencies without our own code separately for caching == # @@ -45,9 +45,8 @@ ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ RUN apt update && apt -y install \ ca-certificates \ iproute2 \ - wget - -RUN rm -rf /var/lib/apt/lists/* + wget \ + && rm -rf /var/lib/apt/lists/* # Created directory for the database and media files RUN mkdir -p /srv/conduit/.local/share/conduit From 87225e70c3441c9ddd96d9fe0c4dd4e5a2c1289e Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Wed, 2 Feb 2022 21:35:57 +0200 Subject: [PATCH 159/445] Parse admin command body templates from doc comments --- src/database/admin.rs | 71 +++++++++++++++++++++++++++++-------------- 1 file changed, 49 insertions(+), 22 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index ea08f65a..c7150493 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -132,7 +132,7 @@ impl Admin { } // Parse and process a message from the admin room -pub fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { +fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { let mut lines = room_message.lines(); let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); @@ -202,7 +202,10 @@ enum AdminCommand { /// Registering a new bridge using the ID of an existing bridge will replace /// the old one. /// - /// [add-yaml-block-to-usage] + /// [commandbody] + /// # ``` + /// # yaml content here + /// # ``` RegisterAppservice, /// Unregister an appservice using its ID @@ -225,10 +228,16 @@ enum AdminCommand { event_id: Box, }, + #[clap(verbatim_doc_comment)] /// Parse and print a PDU from a JSON /// /// The PDU event is only checked for validity and is not added to the /// database. + /// + /// [commandbody] + /// # ``` + /// # PDU json content here + /// # ``` ParsePdu, /// Retrieve and print a PDU by ID from the Conduit database @@ -433,33 +442,51 @@ fn usage_to_html(text: &str) -> String { .expect("Regex compilation should not fail"); let text = re.replace_all(&text, "$1: $4"); - // // Enclose examples in code blocks - // // (?ms) enables multi-line mode and dot-matches-all - // let re = - // Regex::new("(?ms)^Example:\n(.*?)\nUSAGE:$").expect("Regex compilation should not fail"); - // let text = re.replace_all(&text, "EXAMPLE:\n
              $1
              \nUSAGE:"); - - let has_yaml_block_marker = text.contains("\n[add-yaml-block-to-usage]\n"); - let text = text.replace("\n[add-yaml-block-to-usage]\n", ""); + // Look for a `[commandbody]` tag. If it exists, use all lines below it that + // start with a `#` in the USAGE section. + let mut text_lines: Vec<&str> = text.lines().collect(); + let mut command_body = String::new(); + + if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { + text_lines.remove(line_index); + + while text_lines + .get(line_index) + .map(|line| line.starts_with("#")) + .unwrap_or(false) + { + command_body += if text_lines[line_index].starts_with("# ") { + &text_lines[line_index][2..] + } else { + &text_lines[line_index][1..] + }; + command_body += "[nobr]\n"; + text_lines.remove(line_index); + } + } - // Add HTML line-breaks - let text = text.replace("\n", "
              \n"); + let text = text_lines.join("\n"); - let text = if !has_yaml_block_marker { + // Improve the usage section + let text = if command_body.is_empty() { // Wrap the usage line in code tags - let re = Regex::new("(?m)^USAGE:
              \n (@conduit:.*)
              $") + let re = Regex::new("(?m)^USAGE:\n (@conduit:.*)$") .expect("Regex compilation should not fail"); - re.replace_all(&text, "USAGE:
              \n$1
              ") + re.replace_all(&text, "USAGE:\n$1").to_string() } else { // Wrap the usage line in a code block, and add a yaml block example // This makes the usage of e.g. `register-appservice` more accurate - let re = Regex::new("(?m)^USAGE:
              \n (.*?)
              \n
              \n") - .expect("Regex compilation should not fail"); - re.replace_all( - &text, - "USAGE:
              \n
              $1\n```\nyaml content here\n```
              ", - ) + let re = + Regex::new("(?m)^USAGE:\n (.*?)\n\n").expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n
              $1[nobr]\n[commandbodyblock]
              ") + .replace("[commandbodyblock]", &command_body) }; - text.to_string() + // Add HTML line-breaks + let text = text + .replace("\n\n\n", "\n\n") + .replace("\n", "
              \n") + .replace("[nobr]
              ", ""); + + text } From 9ef3abacd43571300a7fbd7d35ba05d040816d8d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 2 Feb 2022 18:03:50 +0100 Subject: [PATCH 160/445] fix: initial state deserialize->serialize error --- src/client_server/room.rs | 7 +++++-- src/pdu.rs | 19 ++----------------- 2 files changed, 7 insertions(+), 19 deletions(-) diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 52d25425..a2339639 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -344,10 +344,13 @@ pub async fn create_room_route( // 6. Events listed in initial_state for event in &body.initial_state { - let pdu_builder = PduBuilder::from(event.deserialize().map_err(|e| { + let mut pdu_builder = event.deserialize_as::().map_err(|e| { warn!("Invalid initial state event: {:?}", e); Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") - })?); + })?; + + // Implicit state key defaults to "" + pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() { diff --git a/src/pdu.rs b/src/pdu.rs index db9375e4..fe004609 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,9 +1,8 @@ use crate::Error; use ruma::{ events::{ - room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyInitialStateEvent, - AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, - EventType, StateEvent, + room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, + AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, @@ -361,17 +360,3 @@ pub struct PduBuilder { pub state_key: Option, pub redacts: Option>, } - -/// Direct conversion prevents loss of the empty `state_key` that ruma requires. -impl From for PduBuilder { - fn from(event: AnyInitialStateEvent) -> Self { - Self { - event_type: EventType::from(event.event_type()), - content: to_raw_value(&event.content()) - .expect("AnyStateEventContent came from JSON and can thus turn back into JSON."), - unsigned: None, - state_key: Some(event.state_key().to_owned()), - redacts: None, - } - } -} From abb4b4cf0b0868fe7e5ee21298278b8c3deacb0e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 3 Feb 2022 13:24:04 +0100 Subject: [PATCH 161/445] Remove TryFrom, TryInto imports They are no longer needed in the 2021 edition. --- src/appservice_server.rs | 7 +------ src/client_server/account.rs | 2 +- src/client_server/context.rs | 2 +- src/client_server/directory.rs | 2 -- src/client_server/media.rs | 1 - src/client_server/membership.rs | 1 - src/client_server/message.rs | 1 - src/client_server/presence.rs | 2 +- src/client_server/profile.rs | 2 +- src/client_server/room.rs | 2 +- src/client_server/sync.rs | 1 - src/database.rs | 1 - src/database/account_data.rs | 2 +- src/database/admin.rs | 2 +- src/database/pusher.rs | 2 +- src/database/rooms.rs | 1 - src/database/rooms/edus.rs | 1 - src/database/sending.rs | 1 - src/database/users.rs | 7 +------ src/pdu.rs | 2 +- src/server_server.rs | 1 - src/utils.rs | 1 - 22 files changed, 11 insertions(+), 33 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index ed886d6c..0152c386 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,12 +1,7 @@ use crate::{utils, Error, Result}; use bytes::BytesMut; use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; -use std::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - mem, - time::Duration, -}; +use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; pub(crate) async fn send_request( diff --git a/src/client_server/account.rs b/src/client_server/account.rs index ff348545..47e2a6a4 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{collections::BTreeMap, sync::Arc}; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; diff --git a/src/client_server/context.rs b/src/client_server/context.rs index e1177661..7ded48da 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::{error::ErrorKind, r0::context::get_context}, events::EventType, }; -use std::{collections::HashSet, convert::TryFrom}; +use std::collections::HashSet; #[cfg(feature = "conduit_bin")] use rocket::get; diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 5a1bc494..719d9af4 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,5 +1,3 @@ -use std::convert::TryInto; - use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma}; use ruma::{ api::{ diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 8524c57e..deea319e 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -9,7 +9,6 @@ use ruma::api::client::{ get_media_config, }, }; -use std::convert::TryInto; #[cfg(feature = "conduit_bin")] use rocket::{get, post}; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 216c4c0a..e855dba2 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -30,7 +30,6 @@ use ruma::{ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, iter, sync::{Arc, RwLock}, time::{Duration, Instant}, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 7d904f90..cf4f0cb6 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -8,7 +8,6 @@ use ruma::{ }; use std::{ collections::{BTreeMap, HashSet}, - convert::TryInto, sync::Arc, }; diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index aaa78a92..cdc1e1f5 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; use ruma::api::client::r0::presence::{get_presence, set_presence}; -use std::{convert::TryInto, time::Duration}; +use std::time::Duration; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 71e61da3..ef58a980 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -12,7 +12,7 @@ use ruma::{ events::{room::member::RoomMemberEventContent, EventType}, }; use serde_json::value::to_raw_value; -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; #[cfg(feature = "conduit_bin")] use rocket::{get, put}; diff --git a/src/client_server/room.rs b/src/client_server/room.rs index a2339639..7ea31d8a 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -27,7 +27,7 @@ use ruma::{ RoomAliasId, RoomId, RoomVersionId, }; use serde_json::{json, value::to_raw_value}; -use std::{cmp::max, collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{cmp::max, collections::BTreeMap, sync::Arc}; use tracing::{info, warn}; #[cfg(feature = "conduit_bin")] diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 14aac3a1..7cfea5af 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -14,7 +14,6 @@ use ruma::{ }; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - convert::TryInto, sync::Arc, time::Duration, }; diff --git a/src/database.rs b/src/database.rs index 8d245b7f..c9cbad4d 100644 --- a/src/database.rs +++ b/src/database.rs @@ -28,7 +28,6 @@ use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; use serde::{de::IgnoredAny, Deserialize}; use std::{ collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, fs::{self, remove_dir_all}, io::Write, mem::size_of, diff --git a/src/database/account_data.rs b/src/database/account_data.rs index 456283bd..ec9d09e8 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -6,7 +6,7 @@ use ruma::{ RoomId, UserId, }; use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, convert::TryFrom, sync::Arc}; +use std::{collections::HashMap, sync::Arc}; use super::abstraction::Tree; diff --git a/src/database/admin.rs b/src/database/admin.rs index a214796b..32972de4 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,4 +1,4 @@ -use std::{convert::TryInto, sync::Arc}; +use std::sync::Arc; use crate::{pdu::PduBuilder, Database}; use rocket::futures::{channel::mpsc, stream::StreamExt}; diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 97ca85d8..f401834a 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -19,7 +19,7 @@ use ruma::{ }; use tracing::{error, info, warn}; -use std::{convert::TryFrom, fmt::Debug, mem, sync::Arc}; +use std::{fmt::Debug, mem, sync::Arc}; use super::abstraction::Tree; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c0cb1ce9..a139853b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -35,7 +35,6 @@ use serde_json::value::to_raw_value; use std::{ borrow::Cow, collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, fmt::Debug, iter, mem::size_of, diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index eb2d3427..289a00a1 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -11,7 +11,6 @@ use ruma::{ }; use std::{ collections::{HashMap, HashSet}, - convert::TryInto, mem, sync::Arc, }; diff --git a/src/database/sending.rs b/src/database/sending.rs index af4ac676..4a032855 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -1,6 +1,5 @@ use std::{ collections::{BTreeMap, HashMap, HashSet}, - convert::TryInto, fmt::Debug, sync::Arc, time::{Duration, Instant}, diff --git a/src/database/users.rs b/src/database/users.rs index 13f9b151..681ee284 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -11,12 +11,7 @@ use ruma::{ DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, UserId, }; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - mem, - sync::Arc, -}; +use std::{collections::BTreeMap, mem, sync::Arc}; use tracing::warn; use super::abstraction::Tree; diff --git a/src/pdu.rs b/src/pdu.rs index fe004609..ec6c961b 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -12,7 +12,7 @@ use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryInto, sync::Arc}; +use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; use tracing::warn; /// Content hashes of a PDU. diff --git a/src/server_server.rs b/src/server_server.rs index 9129951b..e730210a 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -60,7 +60,6 @@ use ruma::{ use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, - convert::{TryFrom, TryInto}, fmt::Debug, future::Future, mem, diff --git a/src/utils.rs b/src/utils.rs index 26d71a8c..e2d71f4c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -4,7 +4,6 @@ use rand::prelude::*; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, - convert::TryInto, str::FromStr, time::{SystemTime, UNIX_EPOCH}, }; From ce60fc6859ea698ed8341beea8321a949d90ad39 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 13:27:54 +0100 Subject: [PATCH 162/445] Stop using set_env to configure tracing-subscriber --- src/main.rs | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/src/main.rs b/src/main.rs index 63b22194..5fda5737 100644 --- a/src/main.rs +++ b/src/main.rs @@ -184,9 +184,6 @@ fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket< #[rocket::main] async fn main() { - // Force log level off, so we can use our own logger - std::env::set_var("CONDUIT_LOG_LEVEL", "off"); - let raw_config = Figment::from(default_config()) .merge( @@ -197,8 +194,6 @@ async fn main() { ) .merge(Env::prefixed("CONDUIT_").global()); - std::env::set_var("RUST_LOG", "warn"); - let config = match raw_config.extract::() { Ok(s) => s, Err(e) => { @@ -244,8 +239,6 @@ async fn main() { println!("exporting"); opentelemetry::global::shutdown_tracer_provider(); } else { - std::env::set_var("RUST_LOG", &config.log); - let registry = tracing_subscriber::Registry::default(); if config.tracing_flame { let (flame_layer, _guard) = @@ -259,7 +252,7 @@ async fn main() { start.await; } else { let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = EnvFilter::try_from_default_env() + let filter_layer = EnvFilter::try_new(&config.log) .or_else(|_| EnvFilter::try_new("info")) .unwrap(); From 974c10e739b70c5798450f5e51819e9d2beed5d3 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 3 Feb 2022 13:30:04 +0100 Subject: [PATCH 163/445] Move Config out of database module --- src/config.rs | 131 ++++++++++++++++++++++++++++++ src/{database => config}/proxy.rs | 0 src/database.rs | 130 +---------------------------- src/lib.rs | 4 +- 4 files changed, 136 insertions(+), 129 deletions(-) create mode 100644 src/config.rs rename src/{database => config}/proxy.rs (100%) diff --git a/src/config.rs b/src/config.rs new file mode 100644 index 00000000..4c0fcc21 --- /dev/null +++ b/src/config.rs @@ -0,0 +1,131 @@ +use std::collections::BTreeMap; + +use ruma::ServerName; +use serde::{de::IgnoredAny, Deserialize}; +use tracing::warn; + +mod proxy; + +use self::proxy::ProxyConfig; + +#[derive(Clone, Debug, Deserialize)] +pub struct Config { + pub server_name: Box, + #[serde(default = "default_database_backend")] + pub database_backend: String, + pub database_path: String, + #[serde(default = "default_db_cache_capacity_mb")] + pub db_cache_capacity_mb: f64, + #[serde(default = "default_conduit_cache_capacity_modifier")] + pub conduit_cache_capacity_modifier: f64, + #[serde(default = "default_rocksdb_max_open_files")] + pub rocksdb_max_open_files: i32, + #[serde(default = "default_pdu_cache_capacity")] + pub pdu_cache_capacity: u32, + #[serde(default = "default_cleanup_second_interval")] + pub cleanup_second_interval: u32, + #[serde(default = "default_max_request_size")] + pub max_request_size: u32, + #[serde(default = "default_max_concurrent_requests")] + pub max_concurrent_requests: u16, + #[serde(default = "false_fn")] + pub allow_registration: bool, + #[serde(default = "true_fn")] + pub allow_encryption: bool, + #[serde(default = "false_fn")] + pub allow_federation: bool, + #[serde(default = "true_fn")] + pub allow_room_creation: bool, + #[serde(default = "false_fn")] + pub allow_jaeger: bool, + #[serde(default = "false_fn")] + pub tracing_flame: bool, + #[serde(default)] + pub proxy: ProxyConfig, + pub jwt_secret: Option, + #[serde(default = "Vec::new")] + pub trusted_servers: Vec>, + #[serde(default = "default_log")] + pub log: String, + #[serde(default)] + pub turn_username: String, + #[serde(default)] + pub turn_password: String, + #[serde(default = "Vec::new")] + pub turn_uris: Vec, + #[serde(default)] + pub turn_secret: String, + #[serde(default = "default_turn_ttl")] + pub turn_ttl: u64, + + #[serde(flatten)] + pub catchall: BTreeMap, +} + +const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; + +impl Config { + pub fn warn_deprecated(&self) { + let mut was_deprecated = false; + for key in self + .catchall + .keys() + .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) + { + warn!("Config parameter {} is deprecated", key); + was_deprecated = true; + } + + if was_deprecated { + warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); + } + } +} + +fn false_fn() -> bool { + false +} + +fn true_fn() -> bool { + true +} + +fn default_database_backend() -> String { + "sqlite".to_owned() +} + +fn default_db_cache_capacity_mb() -> f64 { + 10.0 +} + +fn default_conduit_cache_capacity_modifier() -> f64 { + 1.0 +} + +fn default_rocksdb_max_open_files() -> i32 { + 20 +} + +fn default_pdu_cache_capacity() -> u32 { + 150_000 +} + +fn default_cleanup_second_interval() -> u32 { + 1 * 60 // every minute +} + +fn default_max_request_size() -> u32 { + 20 * 1024 * 1024 // Default to 20 MB +} + +fn default_max_concurrent_requests() -> u16 { + 100 +} + +fn default_log() -> String { + "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() +} + +fn default_turn_ttl() -> u64 { + 60 * 60 * 24 +} diff --git a/src/database/proxy.rs b/src/config/proxy.rs similarity index 100% rename from src/database/proxy.rs rename to src/config/proxy.rs diff --git a/src/database.rs b/src/database.rs index c9cbad4d..5deedcfe 100644 --- a/src/database.rs +++ b/src/database.rs @@ -6,7 +6,6 @@ pub mod appservice; pub mod globals; pub mod key_backups; pub mod media; -pub mod proxy; pub mod pusher; pub mod rooms; pub mod sending; @@ -14,7 +13,7 @@ pub mod transaction_ids; pub mod uiaa; pub mod users; -use crate::{utils, Error, Result}; +use crate::{utils, Config, Error, Result}; use abstraction::DatabaseEngine; use directories::ProjectDirs; use lru_cache::LruCache; @@ -24,8 +23,7 @@ use rocket::{ request::{FromRequest, Request}, Shutdown, State, }; -use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; -use serde::{de::IgnoredAny, Deserialize}; +use ruma::{DeviceId, EventId, RoomId, UserId}; use std::{ collections::{BTreeMap, HashMap, HashSet}, fs::{self, remove_dir_all}, @@ -38,130 +36,6 @@ use std::{ use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, warn}; -use self::proxy::ProxyConfig; - -#[derive(Clone, Debug, Deserialize)] -pub struct Config { - server_name: Box, - #[serde(default = "default_database_backend")] - database_backend: String, - database_path: String, - #[serde(default = "default_db_cache_capacity_mb")] - db_cache_capacity_mb: f64, - #[serde(default = "default_conduit_cache_capacity_modifier")] - conduit_cache_capacity_modifier: f64, - #[serde(default = "default_rocksdb_max_open_files")] - rocksdb_max_open_files: i32, - #[serde(default = "default_pdu_cache_capacity")] - pdu_cache_capacity: u32, - #[serde(default = "default_cleanup_second_interval")] - cleanup_second_interval: u32, - #[serde(default = "default_max_request_size")] - max_request_size: u32, - #[serde(default = "default_max_concurrent_requests")] - max_concurrent_requests: u16, - #[serde(default = "false_fn")] - allow_registration: bool, - #[serde(default = "true_fn")] - allow_encryption: bool, - #[serde(default = "false_fn")] - allow_federation: bool, - #[serde(default = "true_fn")] - allow_room_creation: bool, - #[serde(default = "false_fn")] - pub allow_jaeger: bool, - #[serde(default = "false_fn")] - pub tracing_flame: bool, - #[serde(default)] - proxy: ProxyConfig, - jwt_secret: Option, - #[serde(default = "Vec::new")] - trusted_servers: Vec>, - #[serde(default = "default_log")] - pub log: String, - #[serde(default)] - turn_username: String, - #[serde(default)] - turn_password: String, - #[serde(default = "Vec::new")] - turn_uris: Vec, - #[serde(default)] - turn_secret: String, - #[serde(default = "default_turn_ttl")] - turn_ttl: u64, - - #[serde(flatten)] - catchall: BTreeMap, -} - -const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; - -impl Config { - pub fn warn_deprecated(&self) { - let mut was_deprecated = false; - for key in self - .catchall - .keys() - .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) - { - warn!("Config parameter {} is deprecated", key); - was_deprecated = true; - } - - if was_deprecated { - warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); - } - } -} - -fn false_fn() -> bool { - false -} - -fn true_fn() -> bool { - true -} - -fn default_database_backend() -> String { - "sqlite".to_owned() -} - -fn default_db_cache_capacity_mb() -> f64 { - 10.0 -} - -fn default_conduit_cache_capacity_modifier() -> f64 { - 1.0 -} - -fn default_rocksdb_max_open_files() -> i32 { - 20 -} - -fn default_pdu_cache_capacity() -> u32 { - 150_000 -} - -fn default_cleanup_second_interval() -> u32 { - 1 * 60 // every minute -} - -fn default_max_request_size() -> u32 { - 20 * 1024 * 1024 // Default to 20 MB -} - -fn default_max_concurrent_requests() -> u16 { - 100 -} - -fn default_log() -> String { - "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() -} - -fn default_turn_ttl() -> u64 { - 60 * 60 * 24 -} - pub struct Database { _db: Arc, pub globals: globals::Globals, diff --git a/src/lib.rs b/src/lib.rs index 745eb394..030dfc3a 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ use std::ops::Deref; +mod config; mod database; mod error; mod pdu; @@ -19,7 +20,8 @@ pub mod appservice_server; pub mod client_server; pub mod server_server; -pub use database::{Config, Database}; +pub use config::Config; +pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; pub use rocket::Config as RocketConfig; From 6399a7fe4e07f9992ac8ca0412dc48c87d4d0456 Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Thu, 3 Feb 2022 20:21:04 +0200 Subject: [PATCH 164/445] Remove dash from admin command help --- src/database/admin.rs | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 8f90e4d5..34bef5f5 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -13,7 +13,7 @@ use rocket::{ }; use ruma::{ events::{room::message::RoomMessageEventContent, EventType}, - EventId, RoomId, RoomVersionId, UserId, + EventId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; @@ -140,10 +140,11 @@ fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEven let admin_command = match parse_admin_command(&command_line) { Ok(command) => command, Err(error) => { + let server_name = db.globals.server_name(); let message = error .to_string() - .replace("example.com", db.globals.server_name().as_str()); - let html_message = usage_to_html(&message); + .replace("server.name", server_name.as_str()); + let html_message = usage_to_html(&message, server_name); return RoomMessageEventContent::text_html(message, html_message); } @@ -191,7 +192,7 @@ fn parse_admin_command(command_line: &str) -> std::result::Result String { +fn usage_to_html(text: &str, server_name: &ServerName) -> String { + // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` + let text = text.replace( + &format!("@conduit:{}:-", server_name), + &format!("@conduit:{}: ", server_name), + ); + // For the conduit admin room, subcommands become main commands let text = text.replace("SUBCOMMAND", "COMMAND"); let text = text.replace("subcommand", "command"); From 92571d961f8ec0ce72c0c40433e2487032643060 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 3 Feb 2022 19:54:29 +0100 Subject: [PATCH 165/445] Remove mutation from default_config and set default log_level to off --- Cargo.lock | 1 + Cargo.toml | 1 + src/main.rs | 37 ++++++++++++++++++------------------- 3 files changed, 20 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 6dbb6586..85487715 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -317,6 +317,7 @@ dependencies = [ "image", "jsonwebtoken", "lru-cache", + "maplit", "num_cpus", "opentelemetry", "opentelemetry-jaeger", diff --git a/Cargo.toml b/Cargo.toml index 05782e7c..fe60f6e9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -84,6 +84,7 @@ hmac = "0.11.0" sha-1 = "0.9.8" # used for conduit's CLI and admin room command parsing clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } +maplit = "1.0.2" [target.'cfg(not(target_env = "msvc"))'.dependencies] tikv-jemalloc-ctl = { version = "0.4.2", features = ['use_std'] } diff --git a/src/main.rs b/src/main.rs index 5fda5737..b3e85c95 100644 --- a/src/main.rs +++ b/src/main.rs @@ -9,6 +9,7 @@ use std::sync::Arc; +use maplit::hashset; use opentelemetry::trace::{FutureExt, Tracer}; use rocket::{ catch, catchers, @@ -292,28 +293,26 @@ fn bad_json_catcher() -> Result<()> { } fn default_config() -> rocket::Config { - let mut config = rocket::Config::release_default(); + use rocket::config::{LogLevel, Shutdown, Sig}; - { - let mut shutdown = &mut config.shutdown; + rocket::Config { + // Disable rocket's logging to get only tracing-subscriber's log output + log_level: LogLevel::Off, + shutdown: Shutdown { + // Once shutdown is triggered, this is the amount of seconds before rocket + // will forcefully start shutting down connections, this gives enough time to /sync + // requests and the like (which havent gotten the memo, somehow) to still complete gracefully. + grace: 35, - #[cfg(unix)] - { - use rocket::config::Sig; + // After the grace period, rocket starts shutting down connections, and waits at least this + // many seconds before forcefully shutting all of them down. + mercy: 10, - shutdown.signals.insert(Sig::Term); - shutdown.signals.insert(Sig::Int); - } - - // Once shutdown is triggered, this is the amount of seconds before rocket - // will forcefully start shutting down connections, this gives enough time to /sync - // requests and the like (which havent gotten the memo, somehow) to still complete gracefully. - shutdown.grace = 35; + #[cfg(unix)] + signals: hashset![Sig::Term, Sig::Int], - // After the grace period, rocket starts shutting down connections, and waits at least this - // many seconds before forcefully shutting all of them down. - shutdown.mercy = 10; + ..Shutdown::default() + }, + ..rocket::Config::release_default() } - - config } From d23d6fbb371c4aa263d47ff430f6491283d49915 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 3 Feb 2022 20:23:35 +0100 Subject: [PATCH 166/445] Upgrade Ruma --- Cargo.lock | 36 +++++++++++++++++----------------- Cargo.toml | 2 +- src/client_server/to_device.rs | 2 +- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 85487715..19df999a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2175,7 +2175,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "assign", "js_int", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "bytes", "http", @@ -2212,7 +2212,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2223,7 +2223,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "ruma-api", "ruma-common", @@ -2237,7 +2237,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "assign", "bytes", @@ -2257,7 +2257,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "indexmap", "js_int", @@ -2272,7 +2272,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "indoc", "js_int", @@ -2289,7 +2289,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2300,7 +2300,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "js_int", "ruma-api", @@ -2315,7 +2315,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2330,7 +2330,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2340,7 +2340,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "thiserror", ] @@ -2348,7 +2348,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "js_int", "ruma-api", @@ -2361,7 +2361,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "js_int", "ruma-api", @@ -2376,7 +2376,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "base64 0.13.0", "bytes", @@ -2391,7 +2391,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2402,7 +2402,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2419,7 +2419,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=82becb86c837570224964425929d1b5305784435#82becb86c837570224964425929d1b5305784435" +source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index fe60f6e9..1e1b188f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -20,7 +20,7 @@ rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle request # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "82becb86c837570224964425929d1b5305784435", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f7a10a7e471b59d3096be2695c2a05d407d80df1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 6e764deb..e0aa9e91 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -54,7 +54,7 @@ pub async fn send_event_to_device_route( DirectDeviceContent { sender: sender_user.clone(), ev_type: EventType::from(&*body.event_type), - message_id: body.txn_id.to_string(), + message_id: body.txn_id.clone(), messages, }, )) From e1c0dcb6bb45432c0638f5eced7b719ea2ff1afe Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Thu, 3 Feb 2022 20:52:41 +0200 Subject: [PATCH 167/445] Create admin room and hide migration messages on first run --- src/client_server/account.rs | 298 ++--------------------------- src/database.rs | 81 +++++--- src/database/admin.rs | 353 +++++++++++++++++++++++++++++++++-- 3 files changed, 417 insertions(+), 315 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 47e2a6a4..a210e8ae 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -1,7 +1,11 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::sync::Arc; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; +use crate::{ + database::{admin::make_user_admin, DatabaseGuard}, + pdu::PduBuilder, + utils, ConduitResult, Error, Ruma, +}; use ruma::{ api::client::{ error::ErrorKind, @@ -14,25 +18,13 @@ use ruma::{ }, }, events::{ - room::{ - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, - name::RoomNameEventContent, - power_levels::RoomPowerLevelsEventContent, - topic::RoomTopicEventContent, - }, + room::member::{MembershipState, RoomMemberEventContent}, EventType, }, - identifiers::RoomName, - push, RoomAliasId, RoomId, RoomVersionId, UserId, + push, UserId, }; use serde_json::value::to_raw_value; -use tracing::info; +use tracing::{info, warn}; use register::RegistrationKind; #[cfg(feature = "conduit_bin")] @@ -253,276 +245,16 @@ pub async fn register_route( body.initial_device_display_name.clone(), )?; - // If this is the first user on this server, create the admin room - if db.users.count()? == 1 { - // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) - .expect("@conduit:server_name is valid"); - - db.users.create(&conduit_user, None)?; - - let room_id = RoomId::new(db.globals.server_name()); - - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut content = RoomCreateEventContent::new(conduit_user.clone()); - content.federate = true; - content.predecessor = None; - content.room_version = RoomVersionId::V6; - - // 1. The room create event - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 2. Make conduit bot join - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(conduit_user.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 3. Power levels - let mut users = BTreeMap::new(); - users.insert(conduit_user.clone(), 100.into()); - users.insert(user_id.clone(), 100.into()); - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.1 Join Rules - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.2 History Visibility - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.3 Guest Access - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 6. Events implied by name and topic - let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) - .expect("Room name is valid"); - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", db.globals.server_name()), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // Room alias - let alias: Box = format!("#admins:{}", db.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; + info!("{} registered on this server", user_id); - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + // If this is the first real user, grant them admin privileges + // Note: the server user, @conduit:servername, is generated first + if db.users.count()? == 2 { + make_user_admin(&db, &user_id, displayname).await?; - // Invite and join the real user - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: Some(displayname), - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &user_id, - &room_id, - &db, - &state_lock, - )?; - - // Send welcome message - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; + warn!("Granting {} admin privileges as the first user", user_id); } - info!("{} registered on this server", user_id); - db.flush()?; Ok(register::Response { diff --git a/src/database.rs b/src/database.rs index 5deedcfe..2b1671cd 100644 --- a/src/database.rs +++ b/src/database.rs @@ -34,7 +34,9 @@ use std::{ sync::{Arc, Mutex, RwLock}, }; use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; -use tracing::{debug, error, warn}; +use tracing::{debug, error, info, warn}; + +use self::admin::create_admin_room; pub struct Database { _db: Arc, @@ -301,10 +303,32 @@ impl Database { )?, })); - { - let db = db.read().await; + let guard = db.read().await; + + // Matrix resource ownership is based on the server name; changing it + // requires recreating the database from scratch. + if guard.users.count()? > 0 { + let conduit_user = + UserId::parse_with_server_name("conduit", guard.globals.server_name()) + .expect("@conduit:server_name is valid"); + + if !guard.users.exists(&conduit_user)? { + error!( + "The {} server user does not exist, and the database is not new.", + conduit_user + ); + return Err(Error::bad_database( + "Cannot reuse an existing database after changing the server name, please delete the old one first." + )); + } + } + + // If the database has any data, perform data migrations before starting + let latest_database_version = 11; + + if guard.users.count()? > 0 { + let db = &*guard; // MIGRATIONS - // TODO: database versions of new dbs should probably not be 0 if db.globals.database_version()? < 1 { for (roomserverid, _) in db.rooms.roomserverids.iter() { let mut parts = roomserverid.split(|&b| b == 0xff); @@ -325,7 +349,7 @@ impl Database { db.globals.bump_database_version(1)?; - println!("Migration: 0 -> 1 finished"); + warn!("Migration: 0 -> 1 finished"); } if db.globals.database_version()? < 2 { @@ -344,7 +368,7 @@ impl Database { db.globals.bump_database_version(2)?; - println!("Migration: 1 -> 2 finished"); + warn!("Migration: 1 -> 2 finished"); } if db.globals.database_version()? < 3 { @@ -362,7 +386,7 @@ impl Database { db.globals.bump_database_version(3)?; - println!("Migration: 2 -> 3 finished"); + warn!("Migration: 2 -> 3 finished"); } if db.globals.database_version()? < 4 { @@ -385,7 +409,7 @@ impl Database { db.globals.bump_database_version(4)?; - println!("Migration: 3 -> 4 finished"); + warn!("Migration: 3 -> 4 finished"); } if db.globals.database_version()? < 5 { @@ -409,7 +433,7 @@ impl Database { db.globals.bump_database_version(5)?; - println!("Migration: 4 -> 5 finished"); + warn!("Migration: 4 -> 5 finished"); } if db.globals.database_version()? < 6 { @@ -422,7 +446,7 @@ impl Database { db.globals.bump_database_version(6)?; - println!("Migration: 5 -> 6 finished"); + warn!("Migration: 5 -> 6 finished"); } if db.globals.database_version()? < 7 { @@ -549,7 +573,7 @@ impl Database { db.globals.bump_database_version(7)?; - println!("Migration: 6 -> 7 finished"); + warn!("Migration: 6 -> 7 finished"); } if db.globals.database_version()? < 8 { @@ -557,7 +581,7 @@ impl Database { for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { let shortroomid = db.globals.next_count()?.to_be_bytes(); db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; - println!("Migration: 8"); + info!("Migration: 8"); } // Update pduids db layout let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| { @@ -608,7 +632,7 @@ impl Database { db.globals.bump_database_version(8)?; - println!("Migration: 7 -> 8 finished"); + warn!("Migration: 7 -> 8 finished"); } if db.globals.database_version()? < 9 { @@ -650,7 +674,7 @@ impl Database { println!("smaller batch done"); } - println!("Deleting starts"); + info!("Deleting starts"); let batch2: Vec<_> = db .rooms @@ -673,7 +697,7 @@ impl Database { db.globals.bump_database_version(9)?; - println!("Migration: 8 -> 9 finished"); + warn!("Migration: 8 -> 9 finished"); } if db.globals.database_version()? < 10 { @@ -692,7 +716,7 @@ impl Database { db.globals.bump_database_version(10)?; - println!("Migration: 9 -> 10 finished"); + warn!("Migration: 9 -> 10 finished"); } if db.globals.database_version()? < 11 { @@ -701,11 +725,28 @@ impl Database { .clear()?; db.globals.bump_database_version(11)?; - println!("Migration: 10 -> 11 finished"); + warn!("Migration: 10 -> 11 finished"); } - } - let guard = db.read().await; + assert_eq!(11, latest_database_version); + + info!( + "Loaded {} database with version {}", + config.database_backend, latest_database_version + ); + } else { + guard + .globals + .bump_database_version(latest_database_version)?; + + // Create the admin room and server user on first run + create_admin_room(&guard).await?; + + warn!( + "Created new {} database with version {}", + config.database_backend, latest_database_version + ); + } // This data is probably outdated guard.rooms.edus.presenceid_presence.clear()?; @@ -724,8 +765,6 @@ impl Database { #[cfg(feature = "conduit_bin")] pub async fn start_on_shutdown_tasks(db: Arc>, shutdown: Shutdown) { - use tracing::info; - tokio::spawn(async move { shutdown.await; diff --git a/src/database/admin.rs b/src/database/admin.rs index 34bef5f5..9bbfd4ea 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,4 +1,4 @@ -use std::{convert::TryFrom, convert::TryInto, sync::Arc, time::Instant}; +use std::{collections::BTreeMap, convert::TryFrom, convert::TryInto, sync::Arc, time::Instant}; use crate::{ error::{Error, Result}, @@ -12,12 +12,22 @@ use rocket::{ http::RawStr, }; use ruma::{ + events::room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + topic::RoomTopicEventContent, + }, events::{room::message::RoomMessageEventContent, EventType}, - EventId, RoomId, RoomVersionId, ServerName, UserId, + identifiers::{EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId}, }; use serde_json::value::to_raw_value; use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; -use tracing::warn; pub enum AdminRoomEvent { ProcessMessage(String), @@ -52,16 +62,9 @@ impl Admin { .try_into() .expect("#admins:server_name is a valid room alias"), ) + .expect("Database data for admin room alias must be valid") .expect("Admin room must exist"); - let conduit_room = match conduit_room { - None => { - warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); - return; - } - Some(r) => r, - }; - drop(guard); let send_message = |message: RoomMessageEventContent, @@ -500,3 +503,331 @@ fn usage_to_html(text: &str, server_name: &ServerName) -> String { text } + +/// Create the admin room. +/// +/// Users in this room are considered admins by conduit, and the room can be +/// used to issue admin commands by talking to the server user inside it. +pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { + let room_id = RoomId::new(db.globals.server_name()); + + db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Create a user for the server + let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("@conduit:server_name is valid"); + + db.users.create(&conduit_user, None)?; + + let mut content = RoomCreateEventContent::new(conduit_user.clone()); + content.federate = true; + content.predecessor = None; + content.room_version = RoomVersionId::V6; + + // 1. The room create event + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 2. Make conduit bot join + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 3. Power levels + let mut users = BTreeMap::new(); + users.insert(conduit_user.clone(), 100.into()); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 4.1 Join Rules + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 4.2 History Visibility + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 4.3 Guest Access + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 5. Events implied by name and topic + let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) + .expect("Room name is valid"); + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: format!("Manage {}", db.globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // 6. Room alias + let alias: Box = format!("#admins:{}", db.globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + + Ok(()) +} + +/// Invite the user to the conduit admin room. +/// +/// In conduit, this is equivalent to granting admin privileges. +pub(crate) async fn make_user_admin( + db: &Database, + user_id: &UserId, + displayname: String, +) -> Result<()> { + let admin_room_alias: Box = format!("#admins:{}", db.globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + let room_id = db + .rooms + .id_from_alias(&admin_room_alias)? + .expect("Admin room must exist"); + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Use the server user to grant the new admin's power level + let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("@conduit:server_name is valid"); + + // Invite and join the real user + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &user_id, + &room_id, + &db, + &state_lock, + )?; + + // Set power level + let mut users = BTreeMap::new(); + users.insert(conduit_user.to_owned(), 100.into()); + users.insert(user_id.to_owned(), 100.into()); + + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + // Send welcome message + db.rooms.build_and_append_pdu( + PduBuilder { + event_type: EventType::RoomMessage, + content: to_raw_value(&RoomMessageEventContent::text_html( + "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), + "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &room_id, + &db, + &state_lock, + )?; + + Ok(()) +} From 72cd52e57c7afa1f051b488e6385b59617fffa4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 4 Feb 2022 13:30:42 +0100 Subject: [PATCH 168/445] fix: lazy loading for /context --- src/client_server/context.rs | 99 +++++++++++++++++++++++++++--------- 1 file changed, 74 insertions(+), 25 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 7ded48da..02148f41 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,9 +1,13 @@ use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::{ - api::client::{error::ErrorKind, r0::context::get_context}, + api::client::{ + error::ErrorKind, + r0::{context::get_context, filter::LazyLoadOptions}, + }, events::EventType, }; -use std::collections::HashSet; +use std::{collections::HashSet, convert::TryFrom}; +use tracing::error; #[cfg(feature = "conduit_bin")] use rocket::get; @@ -26,12 +30,15 @@ pub async fn get_context_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } + // Load filter + let filter = body.filter.clone().unwrap_or_default(); + + let (lazy_load_enabled, lazy_load_send_redundant) = match filter.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members: redundant, + } => (true, redundant), + _ => (false, false), + }; let mut lazy_loaded = HashSet::new(); @@ -53,20 +60,30 @@ pub async fn get_context_route( "Base event not found.", ))?; + let room_id = base_event.room_id.clone(); + + if !db.rooms.is_joined(sender_user, &room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + if !db.rooms.lazy_load_was_sent_before( sender_user, sender_device, - &body.room_id, + &room_id, &base_event.sender, - )? { - lazy_loaded.insert(base_event.sender.clone()); + )? || lazy_load_send_redundant + { + lazy_loaded.insert(base_event.sender.as_str().to_owned()); } let base_event = base_event.to_room_event(); let events_before: Vec<_> = db .rooms - .pdus_until(sender_user, &body.room_id, base_token)? + .pdus_until(sender_user, &room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -80,10 +97,11 @@ pub async fn get_context_route( if !db.rooms.lazy_load_was_sent_before( sender_user, sender_device, - &body.room_id, + &room_id, &event.sender, - )? { - lazy_loaded.insert(event.sender.clone()); + )? || lazy_load_send_redundant + { + lazy_loaded.insert(event.sender.as_str().to_owned()); } } @@ -99,7 +117,7 @@ pub async fn get_context_route( let events_after: Vec<_> = db .rooms - .pdus_after(sender_user, &body.room_id, base_token)? + .pdus_after(sender_user, &room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") @@ -113,13 +131,28 @@ pub async fn get_context_route( if !db.rooms.lazy_load_was_sent_before( sender_user, sender_device, - &body.room_id, + &room_id, &event.sender, - )? { - lazy_loaded.insert(event.sender.clone()); + )? || lazy_load_send_redundant + { + lazy_loaded.insert(event.sender.as_str().to_owned()); } } + let shortstatehash = match db.rooms.pdu_shortstatehash( + events_after + .last() + .map_or(&*body.event_id, |(_, e)| &*e.event_id), + )? { + Some(s) => s, + None => db + .rooms + .current_shortstatehash(&room_id)? + .expect("All rooms have state"), + }; + + let state_ids = db.rooms.state_full_ids(shortstatehash)?; + let end_token = events_after .last() .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) @@ -131,12 +164,28 @@ pub async fn get_context_route( .collect(); let mut state = Vec::new(); - for ll_id in &lazy_loaded { - if let Some(member_event) = - db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())? - { - state.push(member_event.to_state_event()); + + for (shortstatekey, id) in state_ids { + let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; + + if event_type != EventType::RoomMember { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state.push(pdu.to_state_event()); + } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { + let pdu = match db.rooms.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state.push(pdu.to_state_event()); } } From 8d8edddb2e1074bf36a14b244c7ed9a0eec054dc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 4 Feb 2022 16:59:30 +0100 Subject: [PATCH 169/445] feat: allow disabling jemalloc via feature --- Cargo.toml | 8 ++++---- src/main.rs | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index aac840b5..dd31c849 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -86,17 +86,17 @@ sha-1 = "0.9.8" clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } maplit = "1.0.2" -[target.'cfg(not(target_env = "msvc"))'.dependencies] -tikv-jemalloc-ctl = { version = "0.4.2", features = ['use_std'] } -tikv-jemallocator = { version = "0.4.1", features = ['unprefixed_malloc_on_supported_platforms'] } +tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true } +tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] +jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] conduit_bin = [] # TODO: add rocket to this when it is optional diff --git a/src/main.rs b/src/main.rs index b3e85c95..ea09dd5b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -26,10 +26,10 @@ use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate pub use rocket::State; -#[cfg(not(target_env = "msvc"))] +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use tikv_jemallocator::Jemalloc; -#[cfg(not(target_env = "msvc"))] +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; From eb0b2c429faf9bfa3c472c198deca8f7f07f46f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 4 Feb 2022 17:15:21 +0100 Subject: [PATCH 170/445] fix: crash on empty search --- src/client_server/search.rs | 9 +++++---- src/database/rooms.rs | 29 +++++++++++++++-------------- 2 files changed, 20 insertions(+), 18 deletions(-) diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 59c9480a..f492292c 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -44,11 +44,12 @@ pub async fn search_events_route( )); } - let search = db + if let Some(search) = db .rooms - .search_pdus(&room_id, &search_criteria.search_term)?; - - searches.push(search.0.peekable()); + .search_pdus(&room_id, &search_criteria.search_term)? + { + searches.push(search.0.peekable()); + } } let skip = match body.next_batch.as_ref().map(|s| s.parse()) { diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2c271d16..0abd2e79 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2771,7 +2771,7 @@ impl Rooms { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result<(impl Iterator> + 'a, Vec)> { + ) -> Result> + 'a, Vec)>> { let prefix = self .get_shortroomid(room_id)? .expect("room exists") @@ -2799,19 +2799,20 @@ impl Rooms { .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) }); - Ok(( - utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .unwrap() - .map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - )) + Ok(utils::common_elements(iterators, |a, b| { + // We compare b with a because we reversed the iterator earlier + b.cmp(a) + }) + .map(|iter| { + ( + iter.map(move |id| { + let mut pduid = prefix_clone.clone(); + pduid.extend_from_slice(&id); + pduid + }), + words, + ) + })) } #[tracing::instrument(skip(self))] From dd03608f173b1cc450183eae2b2be46684e868fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 4 Feb 2022 17:24:45 +0100 Subject: [PATCH 171/445] use our own reqwest fork --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 209adea4..859564bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2018,7 +2018,7 @@ dependencies = [ [[package]] name = "reqwest" version = "0.11.9" -source = "git+https://github.com/niuhuan/reqwest?branch=dns-resolver-fn#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" +source = "git+https://github.com/timokoesters/reqwest?rev=57b7cf4feb921573dfafad7d34b9ac6e44ead0bd#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ "base64 0.13.0", "bytes", diff --git a/Cargo.toml b/Cargo.toml index aac840b5..fb861380 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/niuhuan/reqwest", branch = "dns-resolver-fn" } +reqwest = { default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type thiserror = "1.0.28" # Used to generate thumbnails for images From 103dc7e09b4cdcefca6817b448f6b45677988e84 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 4 Feb 2022 17:57:59 +0100 Subject: [PATCH 172/445] Pre-0.3 doc adjustments --- APPSERVICES.md | 4 ++-- Cargo.lock | 2 +- Cargo.toml | 2 +- DEPLOY.md | 27 ++++++++++++++++----------- README.md | 18 ++++++------------ docker/README.md | 2 +- 6 files changed, 27 insertions(+), 28 deletions(-) diff --git a/APPSERVICES.md b/APPSERVICES.md index 545772a6..8ca015a0 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -2,7 +2,7 @@ ## Getting help -If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Set up the appservice - general instructions @@ -46,7 +46,7 @@ could help. To remove an appservice go to your admin room and execute -```@conduit:your.server.name: unregister-appservice ``` +`@conduit:your.server.name: unregister-appservice ` where `` one of the output of `list-appservices`. diff --git a/Cargo.lock b/Cargo.lock index 859564bd..632b4cea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -304,7 +304,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.2.0" +version = "0.3.0" dependencies = [ "base64 0.13.0", "bytes", diff --git a/Cargo.toml b/Cargo.toml index fb861380..587e26bd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.2.0" +version = "0.3.0" rust-version = "1.56" edition = "2021" diff --git a/DEPLOY.md b/DEPLOY.md index d9f91e03..c3da6975 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -1,9 +1,9 @@ # Deploying Conduit -## Getting help - -If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us -in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +> ## Getting help +> +> If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us +> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Installing Conduit @@ -12,17 +12,21 @@ only offer Linux binaries. You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -| CPU Architecture | Download stable version | -| ------------------------------------------- | ------------------------------ | -| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | -| armv6 | [Download][armv6-musl-master] | -| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | -| armv8 / aarch64 | [Download][armv8-musl-master] | +| CPU Architecture | Download stable version | Download development version | +| ------------------------------------------- | ------------------------------ | ---------------------------- | +| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | [Download][x84_64-musl-next] | +| armv6 | [Download][armv6-musl-master] | [Download][armv6-musl-next] | +| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | [Download][armv7-musl-next] | +| armv8 / aarch64 | [Download][armv8-musl-master] | [Download][armv8-musl-next] | [x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl [armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf [armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf [armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl +[x84_64-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl +[armv6-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf +[armv7-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf +[armv8-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl ```bash $ sudo wget -O /usr/local/bin/matrix-conduit @@ -240,4 +244,5 @@ $ curl https://your.server.name/_matrix/client/versions $ curl https://your.server.name:8448/_matrix/client/versions ``` -If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). +- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/) +- If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). diff --git a/README.md b/README.md index e667d18d..a4f09298 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Conduit + ### A Matrix homeserver written in Rust #### What is the goal? @@ -7,7 +8,6 @@ An efficient Matrix homeserver that's easy to set up and just works. You can ins it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company. - #### Can I try it out? Yes! You can test our Conduit instance by opening a Matrix client ( or Element Android for @@ -17,7 +17,6 @@ It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which was used in the Samsung Galaxy S5. It joined many big rooms including Matrix HQ. - #### What is the current status? As of 2021-09-01, Conduit is Beta, meaning you can join and participate in most @@ -31,26 +30,23 @@ There are still a few important features missing: Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). - #### How can I deploy my own? -Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)\ -Debian package: [debian/README.Debian](debian/README.Debian)\ -Docker: [docker/README.md](docker/README.md) +- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md) +- Debian package: [debian/README.Debian](debian/README.Debian) +- Docker: [docker/README.md](docker/README.md) If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md). - #### How can I contribute? 1. Look for an issue you would like to work on and make sure it's not assigned to other users 2. Ask someone to assign the issue to you (comment on the issue or chat in - #conduit:nordgedanken.dev) -3. Fork the repo and work on the issue. #conduit:nordgedanken.dev is happy to help :) + [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)) +3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :) 4. Submit a MR - #### Thanks to Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project. @@ -60,13 +56,11 @@ Thanks to the contributors to Conduit and all libraries we use, for example: - Ruma: A clean library for the Matrix Spec in Rust - Rocket: A flexible web framework - #### Donate Liberapay: \ Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` - #### Logo Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \ diff --git a/docker/README.md b/docker/README.md index 1f38d66a..d8867385 100644 --- a/docker/README.md +++ b/docker/README.md @@ -35,7 +35,7 @@ or you can skip the build step and pull the image from one of the following regi | GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | [dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit -[gl]: https://gitlab.com/famedly/conduit/container_registry/ +[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 [shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). From 826b077e218d5d698995d786e1a6457fc3b79dad Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 4 Feb 2022 18:43:13 +0100 Subject: [PATCH 173/445] fix(ci): Always build debug version for sytest --- .gitlab-ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 6f1a19f0..8d701c2a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -117,7 +117,7 @@ build:release:cargo:aarch64-unknown-linux-musl: .cargo-debug-shared-settings: extends: ".build-cargo-shared-settings" rules: - - if: '$CI_COMMIT_BRANCH != "master"' + - when: "always" cache: key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: From 63a2c6cce5f01cfca8295a2ea5ad7f639bc257b8 Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Fri, 4 Feb 2022 19:11:29 +0100 Subject: [PATCH 174/445] Add new TURN Readme and reference it from DEPLOY.md --- DEPLOY.md | 14 ++++++++++++++ TURN.md | 25 +++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 TURN.md diff --git a/DEPLOY.md b/DEPLOY.md index d9f91e03..4a0d0ab5 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -241,3 +241,17 @@ $ curl https://your.server.name:8448/_matrix/client/versions ``` If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). + +# What's next? + +## Audio/Video calls + +For Audio/Video call functionality see the [TURN Guide](TURN.md). +As of 2022, Clients known to support a/v calls are + +* Element/Android +* SchildiChat + +Clients known to not support a/v calls are + +* FluffyChat diff --git a/TURN.md b/TURN.md new file mode 100644 index 00000000..ed962955 --- /dev/null +++ b/TURN.md @@ -0,0 +1,25 @@ +# Setting up TURN/STURN + +## General instructions + +* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md). + +## Edit/Add a few settings to your existing conduit.toml + +``` +# Refer to your Coturn settings. +# `server.name` has to match the REALM setting of your Coturn as well as `transport`. +turn_uris = ["turn:server.name?transport=udp", "turn:server.name?transport=tcp"] + +# static-auth-secret of your turnserver +turn_secret = "ADD SECRET HERE" + +# If you have your TURN server configured to use a username and password +# you can provide these information too. In this case comment out `turn_secret above`! +#turn_username = "" +#turn_password = "" +``` + +## Apply settings + +Restart Conduit. \ No newline at end of file From f110b5710a182192231d5ae40f8374bb5dda332f Mon Sep 17 00:00:00 2001 From: Torsten Flammiger Date: Fri, 4 Feb 2022 21:11:50 +0100 Subject: [PATCH 175/445] Move appservice howto into whats-next; again, rename placeholder TURN url --- DEPLOY.md | 5 ++++- TURN.md | 4 ++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 8259b3f0..eecf5136 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -245,10 +245,13 @@ $ curl https://your.server.name:8448/_matrix/client/versions ``` - To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/) -- If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). # What's next? ## Audio/Video calls For Audio/Video call functionality see the [TURN Guide](TURN.md). + +## Appservices + +If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). diff --git a/TURN.md b/TURN.md index ed962955..63c1e99f 100644 --- a/TURN.md +++ b/TURN.md @@ -8,8 +8,8 @@ ``` # Refer to your Coturn settings. -# `server.name` has to match the REALM setting of your Coturn as well as `transport`. -turn_uris = ["turn:server.name?transport=udp", "turn:server.name?transport=tcp"] +# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`. +turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"] # static-auth-secret of your turnserver turn_secret = "ADD SECRET HERE" From 31918bb9908c3917273a70acb6be9ea1b3b1b6ed Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Sat, 5 Feb 2022 08:57:15 +0200 Subject: [PATCH 176/445] Fix admin room processing commands from its own messages --- src/database/rooms.rs | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0abd2e79..aff39dda 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1477,17 +1477,18 @@ impl Rooms { self.tokenids.insert_batch(&mut batch)?; - if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) - && self - .id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )? - .as_ref() - == Some(&pdu.room_id) - { + let admin_room = self.id_from_alias( + <&RoomAliasId>::try_from( + format!("#admins:{}", db.globals.server_name()).as_str(), + ) + .expect("#admins:server_name is a valid room alias"), + )?; + let server_user = format!("@conduit:{}", db.globals.server_name()); + + let to_conduit = body.starts_with(&format!("{}: ", server_user)); + let from_conduit = pdu.sender == server_user; + + if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { db.admin.process_message(body.to_string()); } } From bfbefb0cd2e90549c41247b407d40ad9e1b128b8 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Mon, 7 Feb 2022 12:55:21 +0100 Subject: [PATCH 177/445] Display actual error message from TokioAsyncResolver, if any --- src/database/globals.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index decd84c3..f38f32c9 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -149,7 +149,11 @@ impl Globals { globals, config, keypair: Arc::new(keypair), - dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { + error!( + "Failed to set up trust dns resolver with system config: {}", + e + ); Error::bad_config("Failed to set up trust dns resolver with system config.") })?, actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), From f2b8aa28f303c49a57a282d94c1a003cbbc403eb Mon Sep 17 00:00:00 2001 From: M0dEx Date: Fri, 11 Feb 2022 18:26:56 +0100 Subject: [PATCH 178/445] feat: add a line with the help command to the welcome message --- src/database/admin.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 9bbfd4ea..664aabb5 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -815,8 +815,8 @@ pub(crate) async fn make_user_admin( PduBuilder { event_type: EventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), + "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of commands, send the following message in this room: `@conduit:: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), + "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of commands, send the following message in this room: @conduit:<your_server>: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, From a6976e6d2d3878c93a42799d832fe34016e29860 Mon Sep 17 00:00:00 2001 From: M0dEx Date: Fri, 11 Feb 2022 18:40:51 +0100 Subject: [PATCH 179/445] feat: add 'available' to the help command line in the welcome message --- src/database/admin.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 664aabb5..eae8aa56 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -815,8 +815,8 @@ pub(crate) async fn make_user_admin( PduBuilder { event_type: EventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of commands, send the following message in this room: `@conduit:: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of commands, send the following message in this room: @conduit:<your_server>: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), + "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), + "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of available commands, send the following message in this room: @conduit:<your_server>: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, From f602d32aaa0e4fbc2b5d9eb0d0d89d04bdca21d7 Mon Sep 17 00:00:00 2001 From: M0dEx Date: Fri, 11 Feb 2022 18:51:28 +0100 Subject: [PATCH 180/445] feat: add the actual server name to the welcome message --- src/database/admin.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index eae8aa56..20fb42af 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -815,8 +815,8 @@ pub(crate) async fn make_user_admin( PduBuilder { event_type: EventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of available commands, send the following message in this room: @conduit:<your_server>: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n".to_owned(), + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", db.globals.server_name()).to_owned(), + format!("

              Thank you for trying out Conduit!

              \n

              Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

              \n

              Helpful links:

              \n
              \n

              Website: https://conduit.rs
              Git and Documentation: https://gitlab.com/famedly/conduit
              Report issues: https://gitlab.com/famedly/conduit/-/issues

              \n
              \n

              For a list of available commands, send the following message in this room: @conduit:{}: --help

              \n

              Here are some rooms you can join (by typing the command):

              \n

              Conduit room (Ask questions and get notified on updates):
              /join #conduit:fachschaften.org

              \n

              Conduit lounge (Off-topic, only Conduit users are allowed to join)
              /join #conduit-lounge:conduit.rs

              \n", db.globals.server_name()).to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, From 583ec51f9fd5590d8a6982b91bd8c7b878903b3f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 01:55:53 +0100 Subject: [PATCH 181/445] Remove unnecessary use of event enum --- src/client_server/sync.rs | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 7cfea5af..2b814f54 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -7,7 +7,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, - AnySyncEphemeralRoomEvent, EventType, + EventType, }, serde::Raw, DeviceId, RoomId, UserId, @@ -656,10 +656,8 @@ async fn sync_helper( if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( - db.rooms.edus.typings_all(&room_id)?, - )) - .expect("event is valid, we just created it"), + &serde_json::to_string(&db.rooms.edus.typings_all(&room_id)?) + .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), ); From 5db4c001d1e385513bb27f1484e1eaf5b1497374 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 01:58:36 +0100 Subject: [PATCH 182/445] Remove another unnecessary use of an event enum --- src/client_server/read_marker.rs | 10 +++++----- src/database/rooms/edus.rs | 5 +++-- src/server_server.rs | 6 +++--- 3 files changed, 11 insertions(+), 10 deletions(-) diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 60aa4cef..502a612e 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -4,7 +4,7 @@ use ruma::{ error::ErrorKind, r0::{read_marker::set_read_marker, receipt::create_receipt}, }, - events::{AnyEphemeralRoomEvent, EventType}, + events::EventType, receipt::ReceiptType, MilliSecondsSinceUnixEpoch, }; @@ -73,10 +73,10 @@ pub async fn set_read_marker_route( db.rooms.edus.readreceipt_update( sender_user, &body.room_id, - AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { + ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), - }), + }, &db.globals, )?; } @@ -130,10 +130,10 @@ pub async fn create_receipt_route( db.rooms.edus.readreceipt_update( sender_user, &body.room_id, - AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { + ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), - }), + }, &db.globals, )?; diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs index 289a00a1..118efd4c 100644 --- a/src/database/rooms/edus.rs +++ b/src/database/rooms/edus.rs @@ -2,7 +2,8 @@ use crate::{database::abstraction::Tree, utils, Error, Result}; use ruma::{ events::{ presence::{PresenceEvent, PresenceEventContent}, - AnyEphemeralRoomEvent, SyncEphemeralRoomEvent, + receipt::ReceiptEvent, + SyncEphemeralRoomEvent, }, presence::PresenceState, serde::Raw, @@ -31,7 +32,7 @@ impl RoomEdus { &self, user_id: &UserId, room_id: &RoomId, - event: AnyEphemeralRoomEvent, + event: ReceiptEvent, globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/server_server.rs b/src/server_server.rs index 2c682f6f..a39b3a53 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -46,7 +46,7 @@ use ruma::{ member::{MembershipState, RoomMemberEventContent}, server_acl::RoomServerAclEventContent, }, - AnyEphemeralRoomEvent, EventType, + EventType, }, int, receipt::ReceiptType, @@ -795,10 +795,10 @@ pub async fn send_transaction_message_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(event_id.to_owned(), receipts); - let event = AnyEphemeralRoomEvent::Receipt(ReceiptEvent { + let event = ReceiptEvent { content: ReceiptEventContent(receipt_content), room_id: room_id.clone(), - }); + }; db.rooms.edus.readreceipt_update( &user_id, &room_id, From 1f7b3fa4acd13ea4962ba93c5bc96bd8aa9f44b3 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 20 Jan 2022 11:51:31 +0100 Subject: [PATCH 183/445] Port from Rocket to axum --- .gitignore | 1 - Cargo.lock | 775 ++++++++-------------------- Cargo.toml | 20 +- README.md | 2 +- conduit-example.toml | 2 +- debian/postinst | 2 +- docker-compose.yml | 2 +- docker/docker-compose.traefik.yml | 2 +- src/client_server/account.rs | 26 - src/client_server/alias.rs | 15 - src/client_server/backup.rs | 59 --- src/client_server/capabilities.rs | 7 - src/client_server/config.rs | 25 - src/client_server/context.rs | 7 - src/client_server/device.rs | 22 - src/client_server/directory.rs | 19 - src/client_server/filter.rs | 11 - src/client_server/keys.rs | 29 +- src/client_server/media.rs | 23 +- src/client_server/membership.rs | 47 -- src/client_server/message.rs | 11 - src/client_server/mod.rs | 17 - src/client_server/presence.rs | 11 - src/client_server/profile.rs | 23 - src/client_server/push.rs | 43 -- src/client_server/read_marker.rs | 11 - src/client_server/redact.rs | 6 - src/client_server/report.rs | 11 +- src/client_server/room.rs | 19 - src/client_server/search.rs | 6 - src/client_server/session.rs | 22 +- src/client_server/state.rs | 23 - src/client_server/sync.rs | 7 - src/client_server/tag.rs | 15 - src/client_server/thirdparty.rs | 14 +- src/client_server/to_device.rs | 7 - src/client_server/typing.rs | 9 +- src/client_server/unversioned.rs | 12 +- src/client_server/user_directory.rs | 7 - src/client_server/voip.rs | 9 +- src/config.rs | 19 +- src/database.rs | 48 +- src/database/admin.rs | 51 +- src/database/sending.rs | 19 +- src/error.rs | 45 +- src/lib.rs | 14 - src/main.rs | 505 ++++++++++-------- src/ruma_wrapper.rs | 362 +------------ src/ruma_wrapper/axum.rs | 338 ++++++++++++ src/server_server.rs | 113 +--- src/utils.rs | 39 +- tests/Complement.Dockerfile | 7 +- 52 files changed, 1059 insertions(+), 1880 deletions(-) create mode 100644 src/ruma_wrapper/axum.rs diff --git a/.gitignore b/.gitignore index 1f5f395f..f5e9505b 100644 --- a/.gitignore +++ b/.gitignore @@ -57,7 +57,6 @@ $RECYCLE.BIN/ *.lnk # Conduit -Rocket.toml conduit.toml conduit.db diff --git a/Cargo.lock b/Cargo.lock index 632b4cea..f84c9829 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,6 +2,12 @@ # It is not intended for manual editing. version = 3 +[[package]] +name = "adler" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" + [[package]] name = "adler32" version = "1.2.0" @@ -28,6 +34,21 @@ dependencies = [ "memchr", ] +[[package]] +name = "alloc-no-stdlib" +version = "2.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" +dependencies = [ + "alloc-no-stdlib", +] + [[package]] name = "ansi_term" version = "0.12.1" @@ -56,24 +77,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] -name = "async-stream" -version = "0.3.2" +name = "async-compression" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" +checksum = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6" dependencies = [ - "async-stream-impl", + "brotli", + "flate2", "futures-core", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "memchr", + "pin-project-lite", + "tokio", ] [[package]] @@ -96,17 +110,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "atty" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] - [[package]] name = "autocfg" version = "1.0.1" @@ -114,10 +117,50 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] -name = "base-x" -version = "0.2.8" +name = "axum" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "310a147401c66e79fc78636e4db63ac68cd6acb9ece056de806ea173a15bce32" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "headers", + "http", + "http-body", + "hyper", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tokio-util", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] + +[[package]] +name = "axum-core" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "1ca6c0b218388a7ed6a8d25e94f7dea5498daaa4fd8c711fb3ff166041b06fda" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", +] [[package]] name = "base64" @@ -131,12 +174,6 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" -[[package]] -name = "binascii" -version = "0.1.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" - [[package]] name = "bincode" version = "1.3.3" @@ -191,6 +228,27 @@ dependencies = [ "generic-array", ] +[[package]] +name = "brotli" +version = "3.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f838e47a451d5a8fa552371f80024dd6ace9b7acdf25c4c3d0f9bc6816fb1c39" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + [[package]] name = "bumpalo" version = "3.9.1" @@ -254,7 +312,7 @@ dependencies = [ "libc", "num-integer", "num-traits", - "time 0.1.43", + "time", "winapi", ] @@ -306,18 +364,21 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.3.0" dependencies = [ + "axum", "base64 0.13.0", "bytes", "clap", "crossbeam", "directories", + "figment", + "futures-util", "heed", "hmac", "http", + "hyper", "image", "jsonwebtoken", "lru-cache", - "maplit", "num_cpus", "opentelemetry", "opentelemetry-jaeger", @@ -327,7 +388,6 @@ dependencies = [ "regex", "reqwest", "ring", - "rocket", "rocksdb", "ruma", "rusqlite", @@ -343,9 +403,11 @@ dependencies = [ "tikv-jemalloc-ctl", "tikv-jemallocator", "tokio", + "tower", + "tower-http", "tracing", "tracing-flame", - "tracing-subscriber 0.2.25", + "tracing-subscriber", "trust-dns-resolver", ] @@ -355,29 +417,12 @@ version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" -[[package]] -name = "const_fn" -version = "0.4.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbdcdcb6d86f71c5e97409ad45898af11cbc995b4ee8112d59095a28d376c935" - [[package]] name = "constant_time_eq" version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "cookie" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f1c7727e460397e56abc4bddc1d49e07a1ad78fc98eb2e1c8f032a58a2f80d" -dependencies = [ - "percent-encoding", - "time 0.2.27", - "version_check", -] - [[package]] name = "cpufeatures" version = "0.2.1" @@ -546,39 +591,6 @@ dependencies = [ "const-oid", ] -[[package]] -name = "devise" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c7580b072f1c8476148f16e0a0d5dedddab787da98d86c5082c5e9ed8ab595" -dependencies = [ - "devise_codegen", - "devise_core", -] - -[[package]] -name = "devise_codegen" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "123c73e7a6e51b05c75fe1a1b2f4e241399ea5740ed810b0e3e6cacd9db5e7b2" -dependencies = [ - "devise_core", - "quote", -] - -[[package]] -name = "devise_core" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841ef46f4787d9097405cac4e70fb8644fc037b526e8c14054247c0263c400d0" -dependencies = [ - "bitflags", - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn", -] - [[package]] name = "digest" version = "0.9.0" @@ -608,12 +620,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - [[package]] name = "ed25519" version = "1.3.0" @@ -676,15 +682,6 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" -[[package]] -name = "fastrand" -version = "1.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "779d043b6a0b90cc4c0ed7ee380a6504394cee7efd7db050e3774eee387324b2" -dependencies = [ - "instant", -] - [[package]] name = "figment" version = "0.10.6" @@ -699,6 +696,18 @@ dependencies = [ "version_check", ] +[[package]] +name = "flate2" +version = "1.0.22" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +dependencies = [ + "cfg-if 1.0.0", + "crc32fast", + "libc", + "miniz_oxide 0.4.4", +] + [[package]] name = "fnv" version = "1.0.7" @@ -829,19 +838,6 @@ dependencies = [ "byteorder", ] -[[package]] -name = "generator" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1d9279ca822891c1a4dae06d185612cf8fc6acfe5dff37781b41297811b12ee" -dependencies = [ - "cc", - "libc", - "log", - "rustversion", - "winapi", -] - [[package]] name = "generic-array" version = "0.14.5" @@ -927,6 +923,31 @@ dependencies = [ "hashbrown", ] +[[package]] +name = "headers" +version = "0.3.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a4c4eb0471fcb85846d8b0690695ef354f9afb11cb03cac2e1d7c9253351afb0" +dependencies = [ + "base64 0.13.0", + "bitflags", + "bytes", + "headers-core", + "http", + "httpdate", + "mime", + "sha-1", +] + +[[package]] +name = "headers-core" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" +dependencies = [ + "http", +] + [[package]] name = "heck" version = "0.3.3" @@ -1030,6 +1051,12 @@ dependencies = [ "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" version = "1.5.1" @@ -1074,9 +1101,9 @@ checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ "http", "hyper", - "rustls 0.20.2", + "rustls", "tokio", - "tokio-rustls 0.23.2", + "tokio-rustls", ] [[package]] @@ -1320,21 +1347,6 @@ dependencies = [ "cfg-if 1.0.0", ] -[[package]] -name = "loom" -version = "0.5.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edc5c7d328e32cc4954e8e01193d7f0ef5ab257b5090b70a964e099a36034309" -dependencies = [ - "cfg-if 1.0.0", - "generator", - "scoped-tls", - "serde", - "serde_json", - "tracing", - "tracing-subscriber 0.3.6", -] - [[package]] name = "lru-cache" version = "0.1.2" @@ -1365,21 +1377,18 @@ dependencies = [ "regex-automata", ] -[[package]] -name = "matchers" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" -dependencies = [ - "regex-automata", -] - [[package]] name = "matches" version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "matchit" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "58b6f41fdfbec185dd3dff58b51e323f5bc61692c0de38419a957b0dcfccca3c" + [[package]] name = "memchr" version = "2.4.1" @@ -1416,6 +1425,16 @@ dependencies = [ "adler32", ] +[[package]] +name = "miniz_oxide" +version = "0.4.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +dependencies = [ + "adler", + "autocfg", +] + [[package]] name = "mio" version = "0.7.14" @@ -1438,26 +1457,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "multer" -version = "2.0.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f8f35e687561d5c1667590911e6698a8cb714a134a7505718a182e7bc9d3836" -dependencies = [ - "bytes", - "encoding_rs", - "futures-util", - "http", - "httparse", - "log", - "memchr", - "mime", - "spin 0.9.2", - "tokio", - "tokio-util", - "version_check", -] - [[package]] name = "nom" version = "7.1.0" @@ -1774,7 +1773,7 @@ dependencies = [ "bitflags", "crc32fast", "deflate", - "miniz_oxide", + "miniz_oxide 0.3.7", ] [[package]] @@ -1817,12 +1816,6 @@ dependencies = [ "version_check", ] -[[package]] -name = "proc-macro-hack" -version = "0.5.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" - [[package]] name = "proc-macro2" version = "1.0.36" @@ -1960,26 +1953,6 @@ dependencies = [ "redox_syscall", ] -[[package]] -name = "ref-cast" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -dependencies = [ - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "regex" version = "1.5.4" @@ -2006,15 +1979,6 @@ version = "0.6.25" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] - [[package]] name = "reqwest" version = "0.11.9" @@ -2037,13 +2001,13 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rustls 0.20.2", + "rustls", "rustls-pemfile", "serde", "serde_json", "serde_urlencoded", "tokio", - "tokio-rustls 0.23.2", + "tokio-rustls", "tokio-socks", "url", "wasm-bindgen", @@ -2072,95 +2036,12 @@ dependencies = [ "cc", "libc", "once_cell", - "spin 0.5.2", + "spin", "untrusted", "web-sys", "winapi", ] -[[package]] -name = "rocket" -version = "0.5.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a71c18c42a0eb15bf3816831caf0dad11e7966f2a41aaf486a701979c4dd1f2" -dependencies = [ - "async-stream", - "async-trait", - "atomic", - "atty", - "binascii", - "bytes", - "either", - "figment", - "futures", - "indexmap", - "log", - "memchr", - "multer", - "num_cpus", - "parking_lot", - "pin-project-lite", - "rand 0.8.4", - "ref-cast", - "rocket_codegen", - "rocket_http", - "serde", - "state", - "tempfile", - "time 0.2.27", - "tokio", - "tokio-stream", - "tokio-util", - "ubyte", - "version_check", - "yansi", -] - -[[package]] -name = "rocket_codegen" -version = "0.5.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66f5fa462f7eb958bba8710c17c5d774bbbd59809fa76fb1957af7e545aea8bb" -dependencies = [ - "devise", - "glob", - "indexmap", - "proc-macro2", - "quote", - "rocket_http", - "syn", - "unicode-xid", -] - -[[package]] -name = "rocket_http" -version = "0.5.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c8b7d512d2fcac2316ebe590cde67573844b99e6cc9ee0f53375fa16e25ebd" -dependencies = [ - "cookie", - "either", - "http", - "hyper", - "indexmap", - "log", - "memchr", - "mime", - "parking_lot", - "pear", - "percent-encoding", - "pin-project-lite", - "ref-cast", - "serde", - "smallvec", - "stable-pattern", - "state", - "time 0.2.27", - "tokio", - "tokio-rustls 0.22.0", - "uncased", -] - [[package]] name = "rocksdb" version = "0.17.0" @@ -2465,28 +2346,6 @@ version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" -[[package]] -name = "rustc_version" -version = "0.2.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] - -[[package]] -name = "rustls" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" -dependencies = [ - "base64 0.13.0", - "log", - "ring", - "sct 0.6.1", - "webpki 0.21.4", -] - [[package]] name = "rustls" version = "0.20.2" @@ -2495,8 +2354,8 @@ checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" dependencies = [ "log", "ring", - "sct 0.7.0", - "webpki 0.22.0", + "sct", + "webpki", ] [[package]] @@ -2508,40 +2367,18 @@ dependencies = [ "base64 0.13.0", ] -[[package]] -name = "rustversion" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cc38e8fa666e2de3c4aba7edeb5ffc5246c1c2ed0e3d17e560aeeba736b23f" - [[package]] name = "ryu" version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" -[[package]] -name = "sct" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "sct" version = "0.7.0" @@ -2552,21 +2389,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" version = "1.0.134" @@ -2635,21 +2457,6 @@ dependencies = [ "opaque-debug", ] -[[package]] -name = "sha1" -version = "0.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1da05c97445caa12d05e848c4a4fcbbea29e748ac28f7e80e9b010392063770" -dependencies = [ - "sha1_smol", -] - -[[package]] -name = "sha1_smol" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae1a47186c03a32177042e55dbc5fd5aee900b8e0069a8d70fba96a9375cd012" - [[package]] name = "sha2" version = "0.9.9" @@ -2760,12 +2567,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" -[[package]] -name = "spin" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" - [[package]] name = "spki" version = "0.4.1" @@ -2775,82 +2576,6 @@ dependencies = [ "der", ] -[[package]] -name = "stable-pattern" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4564168c00635f88eaed410d5efa8131afa8d8699a612c80c455a0ba05c21045" -dependencies = [ - "memchr", -] - -[[package]] -name = "standback" -version = "0.2.17" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" -dependencies = [ - "version_check", -] - -[[package]] -name = "state" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cf4f5369e6d3044b5e365c9690f451516ac8f0954084622b49ea3fde2f6de5" -dependencies = [ - "loom", -] - -[[package]] -name = "stdweb" -version = "0.4.20" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" -dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", -] - -[[package]] -name = "stdweb-derive" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] - -[[package]] -name = "stdweb-internal-macros" -version = "0.2.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" -dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", -] - -[[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" - [[package]] name = "subtle" version = "2.4.1" @@ -2868,6 +2593,12 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "synchronoise" version = "1.0.0" @@ -2889,20 +2620,6 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "tempfile" -version = "3.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5cdb1ef4eaeeaddc8fbd371e5017057064af0911902ef36b39801f67cc6d79e4" -dependencies = [ - "cfg-if 1.0.0", - "fastrand", - "libc", - "redox_syscall", - "remove_dir_all", - "winapi", -] - [[package]] name = "textwrap" version = "0.14.2" @@ -3002,44 +2719,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "time" -version = "0.2.27" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" -dependencies = [ - "const_fn", - "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi", -] - -[[package]] -name = "time-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" -dependencies = [ - "proc-macro-hack", - "time-macros-impl", -] - -[[package]] -name = "time-macros-impl" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" -dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", -] - [[package]] name = "tinyvec" version = "1.5.1" @@ -3084,26 +2763,15 @@ dependencies = [ "syn", ] -[[package]] -name = "tokio-rustls" -version = "0.22.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" -dependencies = [ - "rustls 0.19.1", - "tokio", - "webpki 0.21.4", -] - [[package]] name = "tokio-rustls" version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" dependencies = [ - "rustls 0.20.2", + "rustls", "tokio", - "webpki 0.22.0", + "webpki", ] [[package]] @@ -3152,6 +2820,52 @@ dependencies = [ "serde", ] +[[package]] +name = "tower" +version = "0.4.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5651b5f6860a99bd1adb59dbfe1db8beb433e73709d9032b413a77e2fb7c066a" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tokio-util", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "03650267ad175b51c47d02ed9547fc7d4ba2c7e5cb76df0bed67edd1825ae297" +dependencies = [ + "async-compression", + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tokio", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" +version = "0.3.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + [[package]] name = "tower-service" version = "0.3.1" @@ -3165,6 +2879,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" dependencies = [ "cfg-if 1.0.0", + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -3198,7 +2913,7 @@ checksum = "bd520fe41c667b437952383f3a1ec14f1fa45d653f719a77eedd6e6a02d8fa54" dependencies = [ "lazy_static", "tracing", - "tracing-subscriber 0.2.25", + "tracing-subscriber", ] [[package]] @@ -3231,7 +2946,7 @@ dependencies = [ "ansi_term", "chrono", "lazy_static", - "matchers 0.0.1", + "matchers", "regex", "serde", "serde_json", @@ -3244,24 +2959,6 @@ dependencies = [ "tracing-serde", ] -[[package]] -name = "tracing-subscriber" -version = "0.3.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77be66445c4eeebb934a7340f227bfe7b338173d3f8c00a60a5a58005c9faecf" -dependencies = [ - "ansi_term", - "lazy_static", - "matchers 0.1.0", - "regex", - "sharded-slab", - "smallvec", - "thread_local", - "tracing", - "tracing-core", - "tracing-log", -] - [[package]] name = "trust-dns-proto" version = "0.20.3" @@ -3319,22 +3016,12 @@ version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" -[[package]] -name = "ubyte" -version = "0.10.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42756bb9e708855de2f8a98195643dff31a97f0485d90d8467b39dc24be9e8fe" -dependencies = [ - "serde", -] - [[package]] name = "uncased" version = "0.9.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" dependencies = [ - "serde", "version_check", ] @@ -3514,16 +3201,6 @@ dependencies = [ "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -3540,7 +3217,7 @@ version = "0.22.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" dependencies = [ - "webpki 0.22.0", + "webpki", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 7f843435..5fb75dcb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -13,10 +13,11 @@ edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -# Used to handle requests -# TODO: This can become optional as soon as proper configs are supported -# rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests -rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests +# Web framework +axum = { version = "0.4.4", features = ["headers"], optional = true } +hyper = "0.14.16" +tower = { version = "0.4.11", features = ["util"] } +tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -24,8 +25,8 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "f7a10a7e471b59d3096be2695c #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -# Used for long polling and federation sender, should be the same as rocket::tokio -tokio = "1.11.0" +# Async runtime and utilities +tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } @@ -33,7 +34,6 @@ persy = { version = "1.2" , optional = true, features=["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" -# Used for rocket<->ruma conversions http = "0.2.4" # Used to find data directory for default db path directories = "3.0.2" @@ -84,7 +84,9 @@ hmac = "0.11.0" sha-1 = "0.9.8" # used for conduit's CLI and admin room command parsing clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } -maplit = "1.0.2" +futures-util = { version = "0.3.19", default-features = false } +# Used for reading the configuration from conduit.toml & environment variables +figment = { version = "0.10.6", features = ["env", "toml"] } tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true } tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } @@ -98,7 +100,7 @@ backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] -conduit_bin = [] # TODO: add rocket to this when it is optional +conduit_bin = ["axum"] [[bin]] name = "conduit" diff --git a/README.md b/README.md index a4f09298..45b16fd5 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individual Thanks to the contributors to Conduit and all libraries we use, for example: - Ruma: A clean library for the Matrix Spec in Rust -- Rocket: A flexible web framework +- axum: A modular web framework #### Donate diff --git a/conduit-example.toml b/conduit-example.toml index f1578078..c22c8622 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -42,7 +42,7 @@ allow_registration = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "info,state_res=warn,_=off,sled=off" #workers = 4 # default: cpu core count * 2 address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy diff --git a/debian/postinst b/debian/postinst index 6bd1a3a7..29a93676 100644 --- a/debian/postinst +++ b/debian/postinst @@ -74,7 +74,7 @@ allow_registration = true #allow_jaeger = false #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "info,state_res=warn,_=off,sled=off" #workers = 4 # default: cpu core count * 2 # The total amount of memory that the database will use. diff --git a/docker-compose.yml b/docker-compose.yml index 530fc198..88d5c3f6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -33,7 +33,7 @@ services: # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" + # CONDUIT_LOG: info # default is: "info,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index 392b3828..f625080a 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -33,7 +33,7 @@ services: # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" + # CONDUIT_LOG: info # default is: "info,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' diff --git a/src/client_server/account.rs b/src/client_server/account.rs index a210e8ae..bf1a74dd 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -27,8 +27,6 @@ use serde_json::value::to_raw_value; use tracing::{info, warn}; use register::RegistrationKind; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; const GUEST_NAME_LENGTH: usize = 10; @@ -42,10 +40,6 @@ const GUEST_NAME_LENGTH: usize = 10; /// - No user or appservice on this server already claimed this username /// /// Note: This will not reserve the username, so the username might become invalid when trying to register -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/register/available", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_register_available_route( db: DatabaseGuard, @@ -90,10 +84,6 @@ pub async fn get_register_available_route( /// - If type is not guest and no username is given: Always fails after UIAA check /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/register", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn register_route( db: DatabaseGuard, @@ -279,10 +269,6 @@ pub async fn register_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/account/password", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn change_password_route( db: DatabaseGuard, @@ -348,10 +334,6 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/account/whoami", data = "") -)] #[tracing::instrument(skip(body))] pub async fn whoami_route(body: Ruma) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -371,10 +353,6 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult, ) -> ConduitResult { diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 129ac166..6e1b43e8 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -12,16 +12,9 @@ use ruma::{ RoomAliasId, }; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, put}; - /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` /// /// Creates a new room alias on this server. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_alias_route( db: DatabaseGuard, @@ -52,10 +45,6 @@ pub async fn create_alias_route( /// /// - TODO: additional access control checks /// - TODO: Update canonical alias event -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/directory/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( db: DatabaseGuard, @@ -82,10 +71,6 @@ pub async fn delete_alias_route( /// Resolve an alias locally or over federation. /// /// - TODO: Suggest more servers to join via -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_alias_route( db: DatabaseGuard, diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index bbb86726..cc2d7c46 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -9,16 +9,9 @@ use ruma::api::client::{ }, }; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; - /// # `POST /_matrix/client/r0/room_keys/version` /// /// Creates a new backup. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/room_keys/version", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_backup_route( db: DatabaseGuard, @@ -37,10 +30,6 @@ pub async fn create_backup_route( /// # `PUT /_matrix/client/r0/room_keys/version/{version}` /// /// Update information about an existing backup. Only `auth_data` can be modified. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn update_backup_route( db: DatabaseGuard, @@ -58,10 +47,6 @@ pub async fn update_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about the latest backup version. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/version", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_latest_backup_route( db: DatabaseGuard, @@ -89,10 +74,6 @@ pub async fn get_latest_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about an existing backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_backup_route( db: DatabaseGuard, @@ -121,10 +102,6 @@ pub async fn get_backup_route( /// Delete an existing key backup. /// /// - Deletes both information about the backup, as well as all key data related to the backup -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_route( db: DatabaseGuard, @@ -146,10 +123,6 @@ pub async fn delete_backup_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn add_backup_keys_route( db: DatabaseGuard, @@ -198,10 +171,6 @@ pub async fn add_backup_keys_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn add_backup_key_sessions_route( db: DatabaseGuard, @@ -248,10 +217,6 @@ pub async fn add_backup_key_sessions_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn add_backup_key_session_route( db: DatabaseGuard, @@ -292,10 +257,6 @@ pub async fn add_backup_key_session_route( /// # `GET /_matrix/client/r0/room_keys/keys` /// /// Retrieves all keys from the backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_backup_keys_route( db: DatabaseGuard, @@ -311,10 +272,6 @@ pub async fn get_backup_keys_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Retrieves all keys from the backup for a given room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_backup_key_sessions_route( db: DatabaseGuard, @@ -332,10 +289,6 @@ pub async fn get_backup_key_sessions_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Retrieves a key from the backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_backup_key_session_route( db: DatabaseGuard, @@ -357,10 +310,6 @@ pub async fn get_backup_key_session_route( /// # `DELETE /_matrix/client/r0/room_keys/keys` /// /// Delete the keys from the backup. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_keys_route( db: DatabaseGuard, @@ -382,10 +331,6 @@ pub async fn delete_backup_keys_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Delete the keys from the backup for a given room. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, @@ -408,10 +353,6 @@ pub async fn delete_backup_key_sessions_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Delete a key from the backup. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_session_route( db: DatabaseGuard, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index c69b7cb2..8da6855b 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -7,16 +7,9 @@ use ruma::{ }; use std::collections::BTreeMap; -#[cfg(feature = "conduit_bin")] -use rocket::get; - /// # `GET /_matrix/client/r0/capabilities` /// /// Get information on the supported feature set and other relevent capabilities of this server. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/capabilities", data = "<_body>") -)] #[tracing::instrument(skip(_body))] pub async fn get_capabilities_route( _body: Ruma, diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 0c668ff1..0df0decf 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -13,16 +13,9 @@ use ruma::{ use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Sets some account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( db: DatabaseGuard, @@ -54,13 +47,6 @@ pub async fn set_global_account_data_route( /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Sets some room account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - put( - "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", - data = "" - ) -)] #[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( db: DatabaseGuard, @@ -92,10 +78,6 @@ pub async fn set_room_account_data_route( /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Gets some account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( db: DatabaseGuard, @@ -118,13 +100,6 @@ pub async fn get_global_account_data_route( /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Gets some room account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get( - "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", - data = "" - ) -)] #[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( db: DatabaseGuard, diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 02148f41..1fbfee99 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -9,19 +9,12 @@ use ruma::{ use std::{collections::HashSet, convert::TryFrom}; use tracing::error; -#[cfg(feature = "conduit_bin")] -use rocket::get; - /// # `GET /_matrix/client/r0/rooms/{roomId}/context` /// /// Allows loading room history around an event. /// /// - Only works if the user is joined (TODO: always allow, but only show events if the user was /// joined, depending on history_visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_context_route( db: DatabaseGuard, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index f240f2e7..82d11682 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -8,16 +8,10 @@ use ruma::api::client::{ }; use super::SESSION_ID_LENGTH; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; /// # `GET /_matrix/client/r0/devices` /// /// Get metadata on all devices of the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/devices", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_devices_route( db: DatabaseGuard, @@ -37,10 +31,6 @@ pub async fn get_devices_route( /// # `GET /_matrix/client/r0/devices/{deviceId}` /// /// Get metadata on a single device of the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/devices/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_device_route( db: DatabaseGuard, @@ -59,10 +49,6 @@ pub async fn get_device_route( /// # `PUT /_matrix/client/r0/devices/{deviceId}` /// /// Updates the metadata on a given device of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/devices/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn update_device_route( db: DatabaseGuard, @@ -94,10 +80,6 @@ pub async fn update_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/devices/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_device_route( db: DatabaseGuard, @@ -157,10 +139,6 @@ pub async fn delete_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/delete_devices", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( db: DatabaseGuard, diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 719d9af4..06d7a270 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -29,18 +29,11 @@ use ruma::{ }; use tracing::{info, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; - /// # `POST /_matrix/client/r0/publicRooms` /// /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/publicRooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, @@ -62,10 +55,6 @@ pub async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/publicRooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: DatabaseGuard, @@ -96,10 +85,6 @@ pub async fn get_public_rooms_route( /// Sets the visibility of a given room in the room directory. /// /// - TODO: Access control checks -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( db: DatabaseGuard, @@ -129,10 +114,6 @@ pub async fn set_room_visibility_route( /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` /// /// Gets the visibility of a given room in the room directory. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( db: DatabaseGuard, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index f8845f1e..6c42edd3 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -4,18 +4,11 @@ use ruma::api::client::{ r0::filter::{create_filter, get_filter}, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` /// /// Loads a filter that was previously created. /// /// - A user can only access their own filters -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/filter/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_filter_route( db: DatabaseGuard, @@ -33,10 +26,6 @@ pub async fn get_filter_route( /// # `PUT /_matrix/client/r0/user/{userId}/filter` /// /// Creates a new filter to be used by other endpoints. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/user/<_>/filter", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_filter_route( db: DatabaseGuard, diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index e7aec26b..9a7a4e7f 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,6 +1,6 @@ use super::SESSION_ID_LENGTH; use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma}; -use rocket::futures::{prelude::*, stream::FuturesUnordered}; +use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ client::{ @@ -21,19 +21,12 @@ use ruma::{ use serde_json::json; use std::collections::{BTreeMap, HashMap, HashSet}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `POST /_matrix/client/r0/keys/upload` /// /// Publish end-to-end encryption keys for the sender device. /// /// - Adds one time keys /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/upload", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( db: DatabaseGuard, @@ -80,10 +73,6 @@ pub async fn upload_keys_route( /// - Always fetches users from other servers over federation /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/query", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: DatabaseGuard, @@ -105,10 +94,6 @@ pub async fn get_keys_route( /// # `POST /_matrix/client/r0/keys/claim` /// /// Claims one-time keys -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/claim", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: DatabaseGuard, @@ -126,10 +111,6 @@ pub async fn claim_keys_route( /// Uploads end-to-end key information for the sender user. /// /// - Requires UIAA to verify password -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/keys/device_signing/upload", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( db: DatabaseGuard, @@ -190,10 +171,6 @@ pub async fn upload_signing_keys_route( /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/keys/signatures/upload", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( db: DatabaseGuard, @@ -256,10 +233,6 @@ pub async fn upload_signatures_route( /// Gets a list of users who have updated their device identity keys since the previous sync token. /// /// - TODO: left users -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/keys/changes", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( db: DatabaseGuard, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index deea319e..5eba17bc 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -10,18 +10,15 @@ use ruma::api::client::{ }, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - const MXC_LENGTH: usize = 32; /// # `GET /_matrix/media/r0/config` /// /// Returns max upload size. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] -#[tracing::instrument(skip(db))] +#[tracing::instrument(skip(db, _body))] pub async fn get_media_config_route( db: DatabaseGuard, + _body: Ruma, ) -> ConduitResult { Ok(get_media_config::Response { upload_size: db.globals.max_request_size().into(), @@ -35,10 +32,6 @@ pub async fn get_media_config_route( /// /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/media/r0/upload", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_content_route( db: DatabaseGuard, @@ -110,10 +103,6 @@ pub async fn get_remote_content( /// Load media from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/media/r0/download/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_content_route( db: DatabaseGuard, @@ -147,10 +136,6 @@ pub async fn get_content_route( /// Load media from our server or over federation, permitting desired filename. /// /// - Only allows federation if `allow_remote` is true -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/media/r0/download/<_>/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_content_as_filename_route( db: DatabaseGuard, @@ -190,10 +175,6 @@ pub async fn get_content_as_filename_route( /// Load media thumbnail from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_content_thumbnail_route( db: DatabaseGuard, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index e855dba2..c16065ef 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -36,19 +36,12 @@ use std::{ }; use tracing::{debug, error, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `POST /_matrix/client/r0/rooms/{roomId}/join` /// /// Tries to join the sender user into a room. /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/join", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( db: DatabaseGuard, @@ -90,10 +83,6 @@ pub async fn join_room_by_id_route( /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/join/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, @@ -148,10 +137,6 @@ pub async fn join_room_by_id_or_alias_route( /// Tries to leave the sender user from a room. /// /// - This should always work if the user is currently joined. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/leave", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn leave_room_route( db: DatabaseGuard, @@ -169,10 +154,6 @@ pub async fn leave_room_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` /// /// Tries to send an invite event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/invite", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn invite_user_route( db: DatabaseGuard, @@ -192,10 +173,6 @@ pub async fn invite_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/kick` /// /// Tries to send a kick event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/kick", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn kick_user_route( db: DatabaseGuard, @@ -256,10 +233,6 @@ pub async fn kick_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` /// /// Tries to send a ban event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/ban", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn ban_user_route( db: DatabaseGuard, @@ -331,10 +304,6 @@ pub async fn ban_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` /// /// Tries to send an unban event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/unban", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn unban_user_route( db: DatabaseGuard, @@ -399,10 +368,6 @@ pub async fn unban_user_route( /// /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/forget", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn forget_room_route( db: DatabaseGuard, @@ -420,10 +385,6 @@ pub async fn forget_room_route( /// # `POST /_matrix/client/r0/joined_rooms` /// /// Lists all rooms the user has joined. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/joined_rooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( db: DatabaseGuard, @@ -446,10 +407,6 @@ pub async fn joined_rooms_route( /// Lists all joined users in a room (TODO: at a specific point in time, with a specific membership). /// /// - Only works if the user is currently joined -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/members", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( db: DatabaseGuard, @@ -483,10 +440,6 @@ pub async fn get_member_events_route( /// /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn joined_members_route( db: DatabaseGuard, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index cf4f0cb6..4fb87715 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -11,9 +11,6 @@ use std::{ sync::Arc, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}` /// /// Send a message event into the room. @@ -21,10 +18,6 @@ use rocket::{get, put}; /// - Is a NOOP if the txn id was already used before and returns the same event id again /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_message_event_route( db: DatabaseGuard, @@ -110,10 +103,6 @@ pub async fn send_message_event_route( /// /// - Only works if the user is joined (TODO: always allow, but only show events where the user was /// joined, depending on history_visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/messages", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_message_events_route( db: DatabaseGuard, diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index 115ddaf6..a7241b0d 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -62,23 +62,6 @@ pub use unversioned::*; pub use user_directory::*; pub use voip::*; -#[cfg(not(feature = "conduit_bin"))] -use super::State; -#[cfg(feature = "conduit_bin")] -use { - crate::ConduitResult, rocket::options, ruma::api::client::r0::to_device::send_event_to_device, -}; - pub const DEVICE_ID_LENGTH: usize = 10; pub const TOKEN_LENGTH: usize = 256; pub const SESSION_ID_LENGTH: usize = 256; - -/// # `OPTIONS` -/// -/// Web clients use this to get CORS headers. -#[cfg(feature = "conduit_bin")] -#[options("/<_..>")] -#[tracing::instrument] -pub async fn options_route() -> ConduitResult { - Ok(send_event_to_device::Response {}.into()) -} diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index cdc1e1f5..0d58ebff 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -2,16 +2,9 @@ use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; use ruma::api::client::r0::presence::{get_presence, set_presence}; use std::time::Duration; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// /// Sets the presence state of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/presence/<_>/status", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_presence_route( db: DatabaseGuard, @@ -54,10 +47,6 @@ pub async fn set_presence_route( /// Gets the presence state of the given user. /// /// - Only works if you share a room with the user -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/presence/<_>/status", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_presence_route( db: DatabaseGuard, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index ef58a980..bb13b448 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -14,18 +14,11 @@ use ruma::{ use serde_json::value::to_raw_value; use std::sync::Arc; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/profile/{userId}/displayname` /// /// Updates the displayname. /// /// - Also makes sure other users receive the update using presence EDUs -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_>/displayname", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( db: DatabaseGuard, @@ -124,10 +117,6 @@ pub async fn set_displayname_route( /// Returns the displayname of the user. /// /// - If user is on another server: Fetches displayname over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>/displayname", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( db: DatabaseGuard, @@ -163,10 +152,6 @@ pub async fn get_displayname_route( /// Updates the avatar_url and blurhash. /// /// - Also makes sure other users receive the update using presence EDUs -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( db: DatabaseGuard, @@ -267,10 +252,6 @@ pub async fn set_avatar_url_route( /// Returns the avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches avatar_url and blurhash over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( db: DatabaseGuard, @@ -308,10 +289,6 @@ pub async fn get_avatar_url_route( /// Returns the displayname, avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches profile over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_profile_route( db: DatabaseGuard, diff --git a/src/client_server/push.rs b/src/client_server/push.rs index a8ba1a2a..322cf89a 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -12,16 +12,9 @@ use ruma::{ push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; - /// # `GET /_matrix/client/r0/pushrules` /// /// Retrieves the push rules event for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( db: DatabaseGuard, @@ -46,10 +39,6 @@ pub async fn get_pushrules_all_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Retrieves a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( db: DatabaseGuard, @@ -103,10 +92,6 @@ pub async fn get_pushrule_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Creates a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_route( db: DatabaseGuard, @@ -204,10 +189,6 @@ pub async fn set_pushrule_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Gets the actions of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( db: DatabaseGuard, @@ -266,10 +247,6 @@ pub async fn get_pushrule_actions_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( db: DatabaseGuard, @@ -338,10 +315,6 @@ pub async fn set_pushrule_actions_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Gets the enabled status of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( db: DatabaseGuard, @@ -402,10 +375,6 @@ pub async fn get_pushrule_enabled_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Sets the enabled status of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( db: DatabaseGuard, @@ -479,10 +448,6 @@ pub async fn set_pushrule_enabled_route( /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Deletes a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( db: DatabaseGuard, @@ -546,10 +511,6 @@ pub async fn delete_pushrule_route( /// # `GET /_matrix/client/r0/pushers` /// /// Gets all currently active pushers for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushers", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_pushers_route( db: DatabaseGuard, @@ -568,10 +529,6 @@ pub async fn get_pushers_route( /// Adds a pusher for the sender user. /// /// - TODO: Handle `append` -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/pushers/set", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_pushers_route( db: DatabaseGuard, diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 502a612e..c9480f00 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -10,19 +10,12 @@ use ruma::{ }; use std::collections::BTreeMap; -#[cfg(feature = "conduit_bin")] -use rocket::post; - /// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` /// /// Sets different types of read markers. /// /// - Updates fully-read account data event to `fully_read` /// - If `read_receipt` is set: Update private marker and public read receipt EDU -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn set_read_marker_route( db: DatabaseGuard, @@ -89,10 +82,6 @@ pub async fn set_read_marker_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` /// /// Sets private read marker and public read receipt EDU. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_receipt_route( db: DatabaseGuard, diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 85de2330..2b442fc4 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -6,8 +6,6 @@ use ruma::{ events::{room::redaction::RoomRedactionEventContent, EventType}, }; -#[cfg(feature = "conduit_bin")] -use rocket::put; use serde_json::value::to_raw_value; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` @@ -15,10 +13,6 @@ use serde_json::value::to_raw_value; /// Tries to send a redaction event into the room. /// /// - TODO: Handle txn id -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn redact_event_route( db: DatabaseGuard, diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 032e446c..441e33d7 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,21 +1,14 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, utils::HtmlEscape, ConduitResult, Error, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, events::room::message, int, }; -#[cfg(feature = "conduit_bin")] -use rocket::{http::RawStr, post}; - /// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` /// /// Reports an inappropriate event to homeserver admins /// -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/report/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn report_event_route( db: DatabaseGuard, @@ -70,7 +63,7 @@ pub async fn report_event_route( pdu.room_id, pdu.sender, body.score, - RawStr::new(&body.reason).html_escape() + HtmlEscape(&body.reason) ), )); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 7ea31d8a..475c5b45 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -30,9 +30,6 @@ use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; use tracing::{info, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `POST /_matrix/client/r0/createRoom` /// /// Creates a new room. @@ -49,10 +46,6 @@ use rocket::{get, post}; /// - Send events listed in initial state /// - Send events implied by `name` and `topic` /// - Send invite events -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/createRoom", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_room_route( db: DatabaseGuard, @@ -425,10 +418,6 @@ pub async fn create_room_route( /// Gets a single event. /// /// - You have to currently be joined to the room (TODO: Respect history visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( db: DatabaseGuard, @@ -458,10 +447,6 @@ pub async fn get_room_event_route( /// Lists all aliases of the room. /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/aliases", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_room_aliases_route( db: DatabaseGuard, @@ -496,10 +481,6 @@ pub async fn get_room_aliases_route( /// - Transfers some state events /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/upgrade", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( db: DatabaseGuard, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index f492292c..3f8a7010 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,8 +1,6 @@ use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; use ruma::api::client::{error::ErrorKind, r0::search::search_events}; -#[cfg(feature = "conduit_bin")] -use rocket::post; use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; use std::collections::BTreeMap; @@ -11,10 +9,6 @@ use std::collections::BTreeMap; /// Searches rooms for messages. /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/search", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn search_events_route( db: DatabaseGuard, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 61e5519a..264eac03 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -19,16 +19,14 @@ struct Claims { exp: usize, } -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `GET /_matrix/client/r0/login` /// /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] -#[tracing::instrument] -pub async fn get_login_types_route() -> ConduitResult { +#[tracing::instrument(skip(_body))] +pub async fn get_login_types_route( + _body: Ruma, +) -> ConduitResult { Ok( get_login_types::Response::new(vec![get_login_types::LoginType::Password( Default::default(), @@ -48,10 +46,6 @@ pub async fn get_login_types_route() -> ConduitResult /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/login", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn login_route( db: DatabaseGuard, @@ -173,10 +167,6 @@ pub async fn login_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/logout", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn logout_route( db: DatabaseGuard, @@ -203,10 +193,6 @@ pub async fn logout_route( /// /// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) /// from each device of this user. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/logout/all", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn logout_all_route( db: DatabaseGuard, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index c07d4825..96b2184c 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -19,9 +19,6 @@ use ruma::{ EventId, RoomId, UserId, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}` /// /// Sends a state event into the room. @@ -29,10 +26,6 @@ use rocket::{get, put}; /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( db: DatabaseGuard, @@ -63,10 +56,6 @@ pub async fn send_state_event_for_key_route( /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, @@ -103,10 +92,6 @@ pub async fn send_state_event_for_empty_key_route( /// Get all state events for a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( db: DatabaseGuard, @@ -155,10 +140,6 @@ pub async fn get_state_events_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( db: DatabaseGuard, @@ -211,10 +192,6 @@ pub async fn get_state_events_for_key_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 2b814f54..6ba68b0d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -20,9 +20,6 @@ use std::{ use tokio::sync::watch::Sender; use tracing::error; -#[cfg(feature = "conduit_bin")] -use rocket::{get, tokio}; - /// # `GET /_matrix/client/r0/sync` /// /// Synchronize the client's state with the latest state on the server. @@ -57,10 +54,6 @@ use rocket::{get, tokio}; /// /// - Sync is handled in an async task, multiple requests from the same device with the same /// `since` will be cached -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/sync", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn sync_events_route( db: DatabaseGuard, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 42bad4cf..cad3421a 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -8,18 +8,11 @@ use ruma::{ }; use std::collections::BTreeMap; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, put}; - /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` /// /// Adds a tag to the room. /// /// - Inserts the tag into the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn update_tag_route( db: DatabaseGuard, @@ -58,10 +51,6 @@ pub async fn update_tag_route( /// Deletes a tag from the room. /// /// - Removes the tag from the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn delete_tag_route( db: DatabaseGuard, @@ -97,10 +86,6 @@ pub async fn delete_tag_route( /// Returns tags on the room. /// /// - Gets the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_tags_route( db: DatabaseGuard, diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 4305902f..d8b7972e 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -1,19 +1,15 @@ -use crate::ConduitResult; +use crate::{ConduitResult, Ruma}; use ruma::api::client::r0::thirdparty::get_protocols; -#[cfg(feature = "conduit_bin")] -use rocket::get; use std::collections::BTreeMap; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// /// TODO: Fetches all metadata about protocols supported by the homeserver. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/thirdparty/protocols") -)] -#[tracing::instrument] -pub async fn get_protocols_route() -> ConduitResult { +#[tracing::instrument(skip(_body))] +pub async fn get_protocols_route( + _body: Ruma, +) -> ConduitResult { // TODO Ok(get_protocols::Response { protocols: BTreeMap::new(), diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index e0aa9e91..12691185 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -10,16 +10,9 @@ use ruma::{ to_device::DeviceIdOrAllDevices, }; -#[cfg(feature = "conduit_bin")] -use rocket::put; - /// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` /// /// Send a to-device event to a set of client devices. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( db: DatabaseGuard, diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 15e74b35..3a61c584 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -2,18 +2,11 @@ use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; use create_typing_event::Typing; use ruma::api::client::r0::typing::create_typing_event; -#[cfg(feature = "conduit_bin")] -use rocket::put; - /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn create_typing_event_route( +pub async fn create_typing_event_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index ea685b4b..8b1b66f2 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,11 +1,8 @@ use std::{collections::BTreeMap, iter::FromIterator}; -use crate::ConduitResult; +use crate::{ConduitResult, Ruma}; use ruma::api::client::unversioned::get_supported_versions; -#[cfg(feature = "conduit_bin")] -use rocket::get; - /// # `GET /_matrix/client/versions` /// /// Get the versions of the specification and unstable features supported by this server. @@ -16,9 +13,10 @@ use rocket::get; /// /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] -#[tracing::instrument] -pub async fn get_supported_versions_route() -> ConduitResult { +#[tracing::instrument(skip(_body))] +pub async fn get_supported_versions_route( + _body: Ruma, +) -> ConduitResult { let resp = get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index cfcb9bb9..c923ceed 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,18 +1,11 @@ use crate::{database::DatabaseGuard, ConduitResult, Ruma}; use ruma::api::client::r0::user_directory::search_users; -#[cfg(feature = "conduit_bin")] -use rocket::post; - /// # `POST /_matrix/client/r0/user_directory/search` /// /// Searches all known users for a match. /// /// - TODO: Hide users that are not in any public rooms? -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/user_directory/search", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn search_users_route( db: DatabaseGuard, diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 66a85f0f..6abebdcf 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -7,20 +7,13 @@ use std::time::{Duration, SystemTime}; type HmacSha1 = Hmac; -#[cfg(feature = "conduit_bin")] -use rocket::get; - /// # `GET /_matrix/client/r0/voip/turnServer` /// /// TODO: Returns information about the recommended turn server. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/voip/turnServer", data = "") -)] #[tracing::instrument(skip(body, db))] pub async fn turn_server_route( - body: Ruma, db: DatabaseGuard, + body: Ruma, ) -> ConduitResult { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/config.rs b/src/config.rs index 4c0fcc21..48ac9816 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,4 +1,7 @@ -use std::collections::BTreeMap; +use std::{ + collections::BTreeMap, + net::{IpAddr, Ipv4Addr}, +}; use ruma::ServerName; use serde::{de::IgnoredAny, Deserialize}; @@ -10,6 +13,10 @@ use self::proxy::ProxyConfig; #[derive(Clone, Debug, Deserialize)] pub struct Config { + #[serde(default = "default_address")] + pub address: IpAddr, + #[serde(default = "default_port")] + pub port: u16, pub server_name: Box, #[serde(default = "default_database_backend")] pub database_backend: String, @@ -90,6 +97,14 @@ fn true_fn() -> bool { true } +fn default_address() -> IpAddr { + Ipv4Addr::LOCALHOST.into() +} + +fn default_port() -> u16 { + 8000 +} + fn default_database_backend() -> String { "sqlite".to_owned() } @@ -123,7 +138,7 @@ fn default_max_concurrent_requests() -> u16 { } fn default_log() -> String { - "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() + "info,state_res=warn,_=off,sled=off".to_owned() } fn default_turn_ttl() -> u64 { diff --git a/src/database.rs b/src/database.rs index 2b1671cd..9eb8bd56 100644 --- a/src/database.rs +++ b/src/database.rs @@ -13,16 +13,12 @@ pub mod transaction_ids; pub mod uiaa; pub mod users; +use self::admin::create_admin_room; use crate::{utils, Config, Error, Result}; use abstraction::DatabaseEngine; use directories::ProjectDirs; +use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; -use rocket::{ - futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}, - outcome::{try_outcome, IntoOutcome}, - request::{FromRequest, Request}, - Shutdown, State, -}; use ruma::{DeviceId, EventId, RoomId, UserId}; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -33,11 +29,9 @@ use std::{ path::Path, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; +use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, info, warn}; -use self::admin::create_admin_room; - pub struct Database { _db: Arc, pub globals: globals::Globals, @@ -151,8 +145,8 @@ impl Database { eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); } - let (admin_sender, admin_receiver) = mpsc::unbounded(); - let (sending_sender, sending_receiver) = mpsc::unbounded(); + let (admin_sender, admin_receiver) = mpsc::unbounded_channel(); + let (sending_sender, sending_receiver) = mpsc::unbounded_channel(); let db = Arc::new(TokioRwLock::from(Self { _db: builder.clone(), @@ -764,14 +758,9 @@ impl Database { } #[cfg(feature = "conduit_bin")] - pub async fn start_on_shutdown_tasks(db: Arc>, shutdown: Shutdown) { - tokio::spawn(async move { - shutdown.await; - - info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - - db.read().await.globals.rotate.fire(); - }); + pub async fn on_shutdown(db: Arc>) { + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + db.read().await.globals.rotate.fire(); } pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { @@ -948,14 +937,23 @@ impl Deref for DatabaseGuard { } } -#[rocket::async_trait] -impl<'r> FromRequest<'r> for DatabaseGuard { - type Error = (); +#[cfg(feature = "conduit_bin")] +#[axum::async_trait] +impl axum::extract::FromRequest for DatabaseGuard +where + B: Send, +{ + type Rejection = axum::extract::rejection::ExtensionRejection; + + async fn from_request( + req: &mut axum::extract::RequestParts, + ) -> Result { + use axum::extract::Extension; - async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome { - let db = try_outcome!(req.guard::<&State>>>().await); + let Extension(db): Extension>> = + Extension::from_request(req).await?; - Ok(DatabaseGuard(Arc::clone(db).read_owned().await)).or_forward(()) + Ok(DatabaseGuard(db.read_owned().await)) } } diff --git a/src/database/admin.rs b/src/database/admin.rs index 9bbfd4ea..e4b7e0f9 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -1,34 +1,41 @@ -use std::{collections::BTreeMap, convert::TryFrom, convert::TryInto, sync::Arc, time::Instant}; +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, + sync::Arc, + time::Instant, +}; use crate::{ error::{Error, Result}, pdu::PduBuilder, - server_server, Database, PduEvent, + server_server, + utils::HtmlEscape, + Database, PduEvent, }; use clap::Parser; use regex::Regex; -use rocket::{ - futures::{channel::mpsc, stream::StreamExt}, - http::RawStr, -}; use ruma::{ - events::room::{ - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - name::RoomNameEventContent, - power_levels::RoomPowerLevelsEventContent, - topic::RoomTopicEventContent, + events::{ + room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + topic::RoomTopicEventContent, + }, + EventType, }, - events::{room::message::RoomMessageEventContent, EventType}, identifiers::{EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId}, }; use serde_json::value::to_raw_value; -use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; +use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; +#[derive(Debug)] pub enum AdminRoomEvent { ProcessMessage(String), SendMessage(RoomMessageEventContent), @@ -91,7 +98,7 @@ impl Admin { loop { tokio::select! { - Some(event) = receiver.next() => { + Some(event) = receiver.recv() => { let guard = db.read().await; let mutex_state = Arc::clone( guard.globals @@ -123,13 +130,13 @@ impl Admin { pub fn process_message(&self, room_message: String) { self.sender - .unbounded_send(AdminRoomEvent::ProcessMessage(room_message)) + .send(AdminRoomEvent::ProcessMessage(room_message)) .unwrap(); } pub fn send_message(&self, message_content: RoomMessageEventContent) { self.sender - .unbounded_send(AdminRoomEvent::SendMessage(message_content)) + .send(AdminRoomEvent::SendMessage(message_content)) .unwrap(); } } @@ -405,7 +412,7 @@ fn process_admin_command( } else { "PDU was accepted" }, - RawStr::new(&json_text).html_escape() + HtmlEscape(&json_text) ), ) } diff --git a/src/database/sending.rs b/src/database/sending.rs index 4a032855..2d64be15 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -9,11 +9,8 @@ use crate::{ appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; +use futures_util::{stream::FuturesUnordered, StreamExt}; use ring::digest; -use rocket::futures::{ - channel::mpsc, - stream::{FuturesUnordered, StreamExt}, -}; use ruma::{ api::{ appservice, @@ -33,7 +30,7 @@ use ruma::{ }; use tokio::{ select, - sync::{RwLock, Semaphore}, + sync::{mpsc, RwLock, Semaphore}, }; use tracing::{error, warn}; @@ -170,7 +167,7 @@ impl Sending { Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) }) .take(30) - .collect::<>(); + .collect(); // TODO: find edus @@ -207,7 +204,7 @@ impl Sending { } }; }, - Some((key, value)) = receiver.next() => { + Some((key, value)) = receiver.recv() => { if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) { let guard = db.read().await; @@ -417,7 +414,7 @@ impl Sending { key.push(0xff); key.extend_from_slice(pdu_id); self.servernameevent_data.insert(&key, &[])?; - self.sender.unbounded_send((key, vec![])).unwrap(); + self.sender.send((key, vec![])).unwrap(); Ok(()) } @@ -433,7 +430,7 @@ impl Sending { key.push(0xff); key.extend_from_slice(pdu_id); - self.sender.unbounded_send((key.clone(), vec![])).unwrap(); + self.sender.send((key.clone(), vec![])).unwrap(); (key, Vec::new()) }); @@ -454,7 +451,7 @@ impl Sending { key.push(0xff); key.extend_from_slice(&id.to_be_bytes()); self.servernameevent_data.insert(&key, &serialized)?; - self.sender.unbounded_send((key, serialized)).unwrap(); + self.sender.send((key, serialized)).unwrap(); Ok(()) } @@ -466,7 +463,7 @@ impl Sending { key.push(0xff); key.extend_from_slice(pdu_id); self.servernameevent_data.insert(&key, &[])?; - self.sender.unbounded_send((key, vec![])).unwrap(); + self.sender.send((key, vec![])).unwrap(); Ok(()) } diff --git a/src/error.rs b/src/error.rs index 5ffe48c9..817ef50f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -1,27 +1,20 @@ +use std::convert::Infallible; + +use http::StatusCode; use ruma::{ api::client::{ error::{Error as RumaError, ErrorKind}, - r0::uiaa::UiaaInfo, + r0::uiaa::{UiaaInfo, UiaaResponse}, }, ServerName, }; use thiserror::Error; -use tracing::warn; +use tracing::{error, warn}; #[cfg(feature = "persy")] use persy::PersyError; -#[cfg(feature = "conduit_bin")] -use { - crate::RumaResponse, - http::StatusCode, - rocket::{ - response::{self, Responder}, - Request, - }, - ruma::api::client::r0::uiaa::UiaaResponse, - tracing::error, -}; +use crate::RumaResponse; pub type Result = std::result::Result; @@ -81,6 +74,9 @@ pub enum Error { BadRequest(ErrorKind, &'static str), #[error("{0}")] Conflict(&'static str), // This is only needed for when a room alias already exists + #[cfg(feature = "conduit_bin")] + #[error("{0}")] + ExtensionError(#[from] axum::extract::rejection::ExtensionRejection), } impl Error { @@ -139,16 +135,6 @@ impl Error { } } -#[cfg(feature = "conduit_bin")] -impl<'r, 'o> Responder<'r, 'o> for Error -where - 'o: 'r, -{ - fn respond_to(self, r: &'r Request<'_>) -> response::Result<'o> { - self.to_response().respond_to(r) - } -} - #[cfg(feature = "persy")] impl> From> for Error { fn from(err: persy::PE) -> Self { @@ -157,3 +143,16 @@ impl> From> for Error { } } } + +impl From for Error { + fn from(i: Infallible) -> Self { + match i {} + } +} + +#[cfg(feature = "conduit_bin")] +impl axum::response::IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + self.to_response().into_response() + } +} diff --git a/src/lib.rs b/src/lib.rs index 030dfc3a..135ab854 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,8 +7,6 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::ops::Deref; - mod config; mod database; mod error; @@ -24,16 +22,4 @@ pub use config::Config; pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; -pub use rocket::Config as RocketConfig; pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; - -pub struct State<'r, T: Send + Sync + 'static>(pub &'r T); - -impl<'r, T: Send + Sync + 'static> Deref for State<'r, T> { - type Target = T; - - #[inline(always)] - fn deref(&self) -> &T { - self.0 - } -} diff --git a/src/main.rs b/src/main.rs index ea09dd5b..3ab12941 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,24 +7,37 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::sync::Arc; +use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; -use maplit::hashset; +use axum::{ + extract::{FromRequest, MatchedPath}, + handler::Handler, + routing::{get, on, MethodFilter}, + Router, +}; +use figment::{ + providers::{Env, Format, Toml}, + Figment, +}; +use http::{ + header::{self, HeaderName}, + Method, +}; use opentelemetry::trace::{FutureExt, Tracer}; -use rocket::{ - catch, catchers, - figment::{ - providers::{Env, Format, Toml}, - Figment, - }, - routes, Request, +use ruma::{ + api::{IncomingRequest, Metadata}, + Outgoing, +}; +use tokio::{signal, sync::RwLock}; +use tower::ServiceBuilder; +use tower_http::{ + cors::{self, CorsLayer}, + trace::TraceLayer, + ServiceBuilderExt as _, }; -use ruma::api::client::error::ErrorKind; -use tokio::sync::RwLock; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate -pub use rocket::State; #[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] use tikv_jemallocator::Jemalloc; @@ -33,160 +46,10 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; -fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { - rocket::custom(config) - .manage(data) - .mount( - "/", - routes![ - client_server::get_supported_versions_route, - client_server::get_register_available_route, - client_server::register_route, - client_server::get_login_types_route, - client_server::login_route, - client_server::whoami_route, - client_server::logout_route, - client_server::logout_all_route, - client_server::change_password_route, - client_server::deactivate_route, - client_server::third_party_route, - client_server::get_capabilities_route, - client_server::get_pushrules_all_route, - client_server::set_pushrule_route, - client_server::get_pushrule_route, - client_server::set_pushrule_enabled_route, - client_server::get_pushrule_enabled_route, - client_server::get_pushrule_actions_route, - client_server::set_pushrule_actions_route, - client_server::delete_pushrule_route, - client_server::get_room_event_route, - client_server::get_room_aliases_route, - client_server::get_filter_route, - client_server::create_filter_route, - client_server::set_global_account_data_route, - client_server::set_room_account_data_route, - client_server::get_global_account_data_route, - client_server::get_room_account_data_route, - client_server::set_displayname_route, - client_server::get_displayname_route, - client_server::set_avatar_url_route, - client_server::get_avatar_url_route, - client_server::get_profile_route, - client_server::set_presence_route, - client_server::get_presence_route, - client_server::upload_keys_route, - client_server::get_keys_route, - client_server::claim_keys_route, - client_server::create_backup_route, - client_server::update_backup_route, - client_server::delete_backup_route, - client_server::get_latest_backup_route, - client_server::get_backup_route, - client_server::add_backup_key_sessions_route, - client_server::add_backup_keys_route, - client_server::delete_backup_key_session_route, - client_server::delete_backup_key_sessions_route, - client_server::delete_backup_keys_route, - client_server::get_backup_key_session_route, - client_server::get_backup_key_sessions_route, - client_server::get_backup_keys_route, - client_server::set_read_marker_route, - client_server::create_receipt_route, - client_server::create_typing_event_route, - client_server::create_room_route, - client_server::redact_event_route, - client_server::report_event_route, - client_server::create_alias_route, - client_server::delete_alias_route, - client_server::get_alias_route, - client_server::join_room_by_id_route, - client_server::join_room_by_id_or_alias_route, - client_server::joined_members_route, - client_server::leave_room_route, - client_server::forget_room_route, - client_server::joined_rooms_route, - client_server::kick_user_route, - client_server::ban_user_route, - client_server::unban_user_route, - client_server::invite_user_route, - client_server::set_room_visibility_route, - client_server::get_room_visibility_route, - client_server::get_public_rooms_route, - client_server::get_public_rooms_filtered_route, - client_server::search_users_route, - client_server::get_member_events_route, - client_server::get_protocols_route, - client_server::send_message_event_route, - client_server::send_state_event_for_key_route, - client_server::send_state_event_for_empty_key_route, - client_server::get_state_events_route, - client_server::get_state_events_for_key_route, - client_server::get_state_events_for_empty_key_route, - client_server::sync_events_route, - client_server::get_context_route, - client_server::get_message_events_route, - client_server::search_events_route, - client_server::turn_server_route, - client_server::send_event_to_device_route, - client_server::get_media_config_route, - client_server::create_content_route, - client_server::get_content_as_filename_route, - client_server::get_content_route, - client_server::get_content_thumbnail_route, - client_server::get_devices_route, - client_server::get_device_route, - client_server::update_device_route, - client_server::delete_device_route, - client_server::delete_devices_route, - client_server::get_tags_route, - client_server::update_tag_route, - client_server::delete_tag_route, - client_server::options_route, - client_server::upload_signing_keys_route, - client_server::upload_signatures_route, - client_server::get_key_changes_route, - client_server::get_pushers_route, - client_server::set_pushers_route, - // client_server::third_party_route, - client_server::upgrade_room_route, - server_server::get_server_version_route, - server_server::get_server_keys_route, - server_server::get_server_keys_deprecated_route, - server_server::get_public_rooms_route, - server_server::get_public_rooms_filtered_route, - server_server::send_transaction_message_route, - server_server::get_event_route, - server_server::get_missing_events_route, - server_server::get_event_authorization_route, - server_server::get_room_state_route, - server_server::get_room_state_ids_route, - server_server::create_join_event_template_route, - server_server::create_join_event_v1_route, - server_server::create_join_event_v2_route, - server_server::create_invite_route, - server_server::get_devices_route, - server_server::get_room_information_route, - server_server::get_profile_information_route, - server_server::get_keys_route, - server_server::claim_keys_route, - ], - ) - .register( - "/", - catchers![ - not_found_catcher, - forbidden_catcher, - unknown_token_catcher, - missing_token_catcher, - bad_json_catcher - ], - ) -} - -#[rocket::main] +#[tokio::main] async fn main() { let raw_config = - Figment::from(default_config()) + Figment::new() .merge( Toml::file(Env::var("CONDUIT_CONFIG").expect( "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", @@ -217,14 +80,7 @@ async fn main() { } }; - let rocket = setup_rocket(raw_config, Arc::clone(&db)) - .ignite() - .await - .unwrap(); - - Database::start_on_shutdown_tasks(db, rocket.shutdown()).await; - - rocket.launch().await.unwrap(); + run_server(&config, db).await.unwrap(); }; if config.allow_jaeger { @@ -264,55 +120,282 @@ async fn main() { } } -#[catch(404)] -fn not_found_catcher(_: &Request<'_>) -> String { - "404 Not Found".to_owned() -} +async fn run_server(config: &Config, db: Arc>) -> hyper::Result<()> { + let listen_addr = SocketAddr::from((config.address, config.port)); -#[catch(580)] -fn forbidden_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::Forbidden, "Forbidden.")) -} + let x_requested_with = HeaderName::from_static("x-requested-with"); -#[catch(581)] -fn unknown_token_catcher() -> Result<()> { - Err(Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown token.", - )) -} + let middlewares = ServiceBuilder::new() + .sensitive_headers([header::AUTHORIZATION]) + .layer( + TraceLayer::new_for_http().make_span_with(|request: &http::Request<_>| { + let path = if let Some(path) = request.extensions().get::() { + path.as_str() + } else { + request.uri().path() + }; + + tracing::info_span!("http_request", %path) + }), + ) + .compression() + .layer( + CorsLayer::new() + .allow_origin(cors::any()) + .allow_methods([ + Method::GET, + Method::POST, + Method::PUT, + Method::DELETE, + Method::OPTIONS, + ]) + .allow_headers([ + header::ORIGIN, + x_requested_with, + header::CONTENT_TYPE, + header::ACCEPT, + header::AUTHORIZATION, + ]) + .max_age(Duration::from_secs(86400)), + ) + .add_extension(db.clone()); + + axum::Server::bind(&listen_addr) + .serve(routes().layer(middlewares).into_make_service()) + .with_graceful_shutdown(shutdown_signal()) + .await?; + + // After serve exits and before exiting, shutdown the DB + Database::on_shutdown(db).await; -#[catch(582)] -fn missing_token_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::MissingToken, "Missing token.")) + Ok(()) } -#[catch(583)] -fn bad_json_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::BadJson, "Bad json.")) +fn routes() -> Router { + Router::new() + .ruma_route(client_server::get_supported_versions_route) + .ruma_route(client_server::get_register_available_route) + .ruma_route(client_server::register_route) + .ruma_route(client_server::get_login_types_route) + .ruma_route(client_server::login_route) + .ruma_route(client_server::whoami_route) + .ruma_route(client_server::logout_route) + .ruma_route(client_server::logout_all_route) + .ruma_route(client_server::change_password_route) + .ruma_route(client_server::deactivate_route) + .ruma_route(client_server::third_party_route) + .ruma_route(client_server::get_capabilities_route) + .ruma_route(client_server::get_pushrules_all_route) + .ruma_route(client_server::set_pushrule_route) + .ruma_route(client_server::get_pushrule_route) + .ruma_route(client_server::set_pushrule_enabled_route) + .ruma_route(client_server::get_pushrule_enabled_route) + .ruma_route(client_server::get_pushrule_actions_route) + .ruma_route(client_server::set_pushrule_actions_route) + .ruma_route(client_server::delete_pushrule_route) + .ruma_route(client_server::get_room_event_route) + .ruma_route(client_server::get_room_aliases_route) + .ruma_route(client_server::get_filter_route) + .ruma_route(client_server::create_filter_route) + .ruma_route(client_server::set_global_account_data_route) + .ruma_route(client_server::set_room_account_data_route) + .ruma_route(client_server::get_global_account_data_route) + .ruma_route(client_server::get_room_account_data_route) + .ruma_route(client_server::set_displayname_route) + .ruma_route(client_server::get_displayname_route) + .ruma_route(client_server::set_avatar_url_route) + .ruma_route(client_server::get_avatar_url_route) + .ruma_route(client_server::get_profile_route) + .ruma_route(client_server::set_presence_route) + .ruma_route(client_server::get_presence_route) + .ruma_route(client_server::upload_keys_route) + .ruma_route(client_server::get_keys_route) + .ruma_route(client_server::claim_keys_route) + .ruma_route(client_server::create_backup_route) + .ruma_route(client_server::update_backup_route) + .ruma_route(client_server::delete_backup_route) + .ruma_route(client_server::get_latest_backup_route) + .ruma_route(client_server::get_backup_route) + .ruma_route(client_server::add_backup_key_sessions_route) + .ruma_route(client_server::add_backup_keys_route) + .ruma_route(client_server::delete_backup_key_session_route) + .ruma_route(client_server::delete_backup_key_sessions_route) + .ruma_route(client_server::delete_backup_keys_route) + .ruma_route(client_server::get_backup_key_session_route) + .ruma_route(client_server::get_backup_key_sessions_route) + .ruma_route(client_server::get_backup_keys_route) + .ruma_route(client_server::set_read_marker_route) + .ruma_route(client_server::create_receipt_route) + .ruma_route(client_server::create_typing_event_route) + .ruma_route(client_server::create_room_route) + .ruma_route(client_server::redact_event_route) + .ruma_route(client_server::report_event_route) + .ruma_route(client_server::create_alias_route) + .ruma_route(client_server::delete_alias_route) + .ruma_route(client_server::get_alias_route) + .ruma_route(client_server::join_room_by_id_route) + .ruma_route(client_server::join_room_by_id_or_alias_route) + .ruma_route(client_server::joined_members_route) + .ruma_route(client_server::leave_room_route) + .ruma_route(client_server::forget_room_route) + .ruma_route(client_server::joined_rooms_route) + .ruma_route(client_server::kick_user_route) + .ruma_route(client_server::ban_user_route) + .ruma_route(client_server::unban_user_route) + .ruma_route(client_server::invite_user_route) + .ruma_route(client_server::set_room_visibility_route) + .ruma_route(client_server::get_room_visibility_route) + .ruma_route(client_server::get_public_rooms_route) + .ruma_route(client_server::get_public_rooms_filtered_route) + .ruma_route(client_server::search_users_route) + .ruma_route(client_server::get_member_events_route) + .ruma_route(client_server::get_protocols_route) + .ruma_route(client_server::send_message_event_route) + .ruma_route(client_server::send_state_event_for_key_route) + .ruma_route(client_server::send_state_event_for_empty_key_route) + .ruma_route(client_server::get_state_events_route) + .ruma_route(client_server::get_state_events_for_key_route) + .ruma_route(client_server::get_state_events_for_empty_key_route) + .route( + "/_matrix/client/r0/sync", + get(client_server::sync_events_route), + ) + .ruma_route(client_server::get_context_route) + .ruma_route(client_server::get_message_events_route) + .ruma_route(client_server::search_events_route) + .ruma_route(client_server::turn_server_route) + .ruma_route(client_server::send_event_to_device_route) + .ruma_route(client_server::get_media_config_route) + .ruma_route(client_server::create_content_route) + .ruma_route(client_server::get_content_route) + .ruma_route(client_server::get_content_as_filename_route) + .ruma_route(client_server::get_content_thumbnail_route) + .ruma_route(client_server::get_devices_route) + .ruma_route(client_server::get_device_route) + .ruma_route(client_server::update_device_route) + .ruma_route(client_server::delete_device_route) + .ruma_route(client_server::delete_devices_route) + .ruma_route(client_server::get_tags_route) + .ruma_route(client_server::update_tag_route) + .ruma_route(client_server::delete_tag_route) + .ruma_route(client_server::upload_signing_keys_route) + .ruma_route(client_server::upload_signatures_route) + .ruma_route(client_server::get_key_changes_route) + .ruma_route(client_server::get_pushers_route) + .ruma_route(client_server::set_pushers_route) + // .ruma_route(client_server::third_party_route) + .ruma_route(client_server::upgrade_room_route) + .ruma_route(server_server::get_server_version_route) + .route( + "/_matrix/key/v2/server", + get(server_server::get_server_keys_route), + ) + .route( + "/_matrix/key/v2/server/:key_id", + get(server_server::get_server_keys_deprecated_route), + ) + .ruma_route(server_server::get_public_rooms_route) + .ruma_route(server_server::get_public_rooms_filtered_route) + .ruma_route(server_server::send_transaction_message_route) + .ruma_route(server_server::get_event_route) + .ruma_route(server_server::get_missing_events_route) + .ruma_route(server_server::get_event_authorization_route) + .ruma_route(server_server::get_room_state_route) + .ruma_route(server_server::get_room_state_ids_route) + .ruma_route(server_server::create_join_event_template_route) + .ruma_route(server_server::create_join_event_v1_route) + .ruma_route(server_server::create_join_event_v2_route) + .ruma_route(server_server::create_invite_route) + .ruma_route(server_server::get_devices_route) + .ruma_route(server_server::get_room_information_route) + .ruma_route(server_server::get_profile_information_route) + .ruma_route(server_server::get_keys_route) + .ruma_route(server_server::claim_keys_route) } -fn default_config() -> rocket::Config { - use rocket::config::{LogLevel, Shutdown, Sig}; +async fn shutdown_signal() { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; - rocket::Config { - // Disable rocket's logging to get only tracing-subscriber's log output - log_level: LogLevel::Off, - shutdown: Shutdown { - // Once shutdown is triggered, this is the amount of seconds before rocket - // will forcefully start shutting down connections, this gives enough time to /sync - // requests and the like (which havent gotten the memo, somehow) to still complete gracefully. - grace: 35, + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); - // After the grace period, rocket starts shutting down connections, and waits at least this - // many seconds before forcefully shutting all of them down. - mercy: 10, + tokio::select! { + _ = ctrl_c => {}, + _ = terminate => {}, + } +} - #[cfg(unix)] - signals: hashset![Sig::Term, Sig::Int], +trait RouterExt { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static; +} - ..Shutdown::default() - }, - ..rocket::Config::release_default() +impl RouterExt for Router { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static, + { + let meta = H::METADATA; + let method_filter = match meta.method { + Method::DELETE => MethodFilter::DELETE, + Method::GET => MethodFilter::GET, + Method::HEAD => MethodFilter::HEAD, + Method::OPTIONS => MethodFilter::OPTIONS, + Method::PATCH => MethodFilter::PATCH, + Method::POST => MethodFilter::POST, + Method::PUT => MethodFilter::PUT, + Method::TRACE => MethodFilter::TRACE, + _ => panic!(""), + }; + + self.route(meta.path, on(method_filter, handler)) } } + +pub trait RumaHandler: Handler { + const METADATA: Metadata; +} + +macro_rules! impl_ruma_handler { + ( $($ty:ident),* $(,)? ) => { + #[axum::async_trait] + #[allow(non_snake_case)] + impl RumaHandler<($($ty,)* Ruma,)> for F + where + Req: Outgoing, + Req::Incoming: IncomingRequest + Send, + F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, + Fut: Future::OutgoingResponse + >> + Send, + $( $ty: FromRequest + Send, )* + { + const METADATA: Metadata = Req::Incoming::METADATA; + } + }; +} + +impl_ruma_handler!(); +impl_ruma_handler!(T1); +impl_ruma_handler!(T1, T2); +impl_ruma_handler!(T1, T2, T3); +impl_ruma_handler!(T1, T2, T3, T4); +impl_ruma_handler!(T1, T2, T3, T4, T5); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 1bd921d9..12be79a9 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ -use crate::{database::DatabaseGuard, Error}; +use crate::Error; use ruma::{ - api::{client::r0::uiaa::UiaaResponse, OutgoingResponse}, + api::client::r0::uiaa::UiaaResponse, identifiers::{DeviceId, UserId}, signatures::CanonicalJsonValue, Outgoing, ServerName, @@ -8,24 +8,9 @@ use ruma::{ use std::ops::Deref; #[cfg(feature = "conduit_bin")] -use { - crate::server_server, - rocket::{ - data::{self, ByteUnit, Data, FromData}, - http::Status, - outcome::Outcome::*, - response::{self, Responder}, - tokio::io::AsyncReadExt, - Request, - }, - ruma::api::{AuthScheme, IncomingRequest}, - std::collections::BTreeMap, - std::io::Cursor, - tracing::{debug, warn}, -}; +mod axum; -/// This struct converts rocket requests into ruma structs by converting them into http requests -/// first. +/// Extractor for Ruma request structs pub struct Ruma { pub body: T::Incoming, pub sender_user: Option>, @@ -36,300 +21,6 @@ pub struct Ruma { pub from_appservice: bool, } -#[cfg(feature = "conduit_bin")] -#[rocket::async_trait] -impl<'a, T: Outgoing> FromData<'a> for Ruma -where - T::Incoming: IncomingRequest, -{ - type Error = (); - - #[tracing::instrument(skip(request, data))] - async fn from_data( - request: &'a Request<'_>, - data: Data<'a>, - ) -> data::Outcome<'a, Self, Self::Error> { - let metadata = T::Incoming::METADATA; - let db = request - .guard::() - .await - .expect("database was loaded"); - - // Get token from header or query value - let token = request - .headers() - .get_one("Authorization") - .and_then(|s| s.get(7..)) // Split off "Bearer " - .or_else(|| request.query_value("access_token").and_then(|r| r.ok())); - - let limit = db.globals.max_request_size(); - let mut handle = data.open(ByteUnit::Byte(limit.into())); - let mut body = Vec::new(); - if handle.read_to_end(&mut body).await.is_err() { - // Client disconnected - // Missing Token - return Failure((Status::new(582), ())); - } - - let mut json_body = serde_json::from_slice::(&body).ok(); - - let (sender_user, sender_device, sender_servername, from_appservice) = if let Some(( - _id, - registration, - )) = db - .appservice - .all() - .unwrap() - .iter() - .find(|(_id, registration)| { - registration - .get("as_token") - .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token == Some(as_token)) - }) { - match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - let user_id = request.query_value::("user_id").map_or_else( - || { - UserId::parse_with_server_name( - registration - .get("sender_localpart") - .unwrap() - .as_str() - .unwrap(), - db.globals.server_name(), - ) - .unwrap() - }, - |string| { - UserId::parse(string.expect("parsing to string always works")).unwrap() - }, - ); - - if !db.users.exists(&user_id).unwrap() { - // Forbidden - return Failure((Status::new(580), ())); - } - - // TODO: Check if appservice is allowed to be that user - (Some(user_id), None, None, true) - } - AuthScheme::ServerSignatures => (None, None, None, true), - AuthScheme::None => (None, None, None, true), - } - } else { - match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - if let Some(token) = token { - match db.users.find_from_token(token).unwrap() { - // Unknown Token - None => return Failure((Status::new(581), ())), - Some((user_id, device_id)) => ( - Some(user_id), - Some(Box::::from(device_id)), - None, - false, - ), - } - } else { - // Missing Token - return Failure((Status::new(582), ())); - } - } - AuthScheme::ServerSignatures => { - // Get origin from header - let x_matrix = match request - .headers() - .get_one("Authorization") - .and_then(|s| s.get(9..)) // Split off "X-Matrix " and parse the rest - .map(|s| { - s.split_terminator(',') - .map(|field| { - let mut splits = field.splitn(2, '='); - (splits.next(), splits.next().map(|s| s.trim_matches('"'))) - }) - .collect::>() - }) { - Some(t) => t, - None => { - warn!("No Authorization header"); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let origin_str = match x_matrix.get(&Some("origin")) { - Some(Some(o)) => *o, - _ => { - warn!("Invalid X-Matrix header origin field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let origin = match ServerName::parse(origin_str) { - Ok(s) => s, - _ => { - warn!( - "Invalid server name in X-Matrix header origin field: {:?}", - x_matrix - ); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let key = match x_matrix.get(&Some("key")) { - Some(Some(k)) => *k, - _ => { - warn!("Invalid X-Matrix header key field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let sig = match x_matrix.get(&Some("sig")) { - Some(Some(s)) => *s, - _ => { - warn!("Invalid X-Matrix header sig field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let mut request_map = BTreeMap::::new(); - - if let Some(json_body) = &json_body { - request_map.insert("content".to_owned(), json_body.clone()); - }; - - request_map.insert( - "method".to_owned(), - CanonicalJsonValue::String(request.method().to_string()), - ); - request_map.insert( - "uri".to_owned(), - CanonicalJsonValue::String(request.uri().to_string()), - ); - request_map.insert( - "origin".to_owned(), - CanonicalJsonValue::String(origin.as_str().to_owned()), - ); - request_map.insert( - "destination".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - - let mut origin_signatures = BTreeMap::new(); - origin_signatures - .insert(key.to_owned(), CanonicalJsonValue::String(sig.to_owned())); - - let mut signatures = BTreeMap::new(); - signatures.insert( - origin.as_str().to_owned(), - CanonicalJsonValue::Object(origin_signatures), - ); - - request_map.insert( - "signatures".to_owned(), - CanonicalJsonValue::Object(signatures), - ); - - let keys = - match server_server::fetch_signing_keys(&db, &origin, vec![key.to_owned()]) - .await - { - Ok(b) => b, - Err(e) => { - warn!("Failed to fetch signing keys: {}", e); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert(origin.as_str().to_owned(), keys); - - match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => (None, None, Some(origin), false), - Err(e) => { - warn!( - "Failed to verify json request from {}: {}\n{:?}", - origin, e, request_map - ); - - if request.uri().to_string().contains('@') { - warn!("Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: use nocanon)"); - } - - // Forbidden - return Failure((Status::new(580), ())); - } - } - } - AuthScheme::None => (None, None, None, false), - } - }; - - let mut http_request = http::Request::builder() - .uri(request.uri().to_string()) - .method(&*request.method().to_string()); - for header in request.headers().iter() { - http_request = http_request.header(header.name.as_str(), &*header.value); - } - - if let Some(json_body) = json_body.as_mut().and_then(|val| val.as_object_mut()) { - let user_id = sender_user.clone().unwrap_or_else(|| { - UserId::parse_with_server_name("", db.globals.server_name()) - .expect("we know this is valid") - }); - - if let Some(CanonicalJsonValue::Object(initial_request)) = json_body - .get("auth") - .and_then(|auth| auth.as_object()) - .and_then(|auth| auth.get("session")) - .and_then(|session| session.as_str()) - .and_then(|session| { - db.uiaa.get_uiaa_request( - &user_id, - &sender_device.clone().unwrap_or_else(|| "".into()), - session, - ) - }) - { - for (key, value) in initial_request { - json_body.entry(key).or_insert(value); - } - } - body = serde_json::to_vec(json_body).expect("value to bytes can't fail"); - } - - let http_request = http_request.body(&*body).unwrap(); - debug!("{:?}", http_request); - match ::try_from_http_request(http_request) { - Ok(t) => Success(Ruma { - body: t, - sender_user, - sender_device, - sender_servername, - from_appservice, - json_body, - }), - Err(e) => { - warn!("{:?}", e); - // Bad Json - Failure((Status::new(583), ())) - } - } - } -} - impl Deref for Ruma { type Target = T::Incoming; @@ -338,41 +29,9 @@ impl Deref for Ruma { } } -/// This struct converts ruma responses into rocket http responses. +/// This struct converts ruma structs to http responses. pub type ConduitResult = Result, Error>; -pub fn response(response: RumaResponse) -> response::Result<'static> { - let http_response = response - .0 - .try_into_http_response::>() - .map_err(|_| Status::InternalServerError)?; - - let mut response = rocket::response::Response::build(); - - let status = http_response.status(); - response.status(Status::new(status.as_u16())); - - for header in http_response.headers() { - response.raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); - } - - let http_body = http_response.into_body(); - - response.sized_body(http_body.len(), Cursor::new(http_body)); - - response.raw_header("Access-Control-Allow-Origin", "*"); - response.raw_header( - "Access-Control-Allow-Methods", - "GET, POST, PUT, DELETE, OPTIONS", - ); - response.raw_header( - "Access-Control-Allow-Headers", - "Origin, X-Requested-With, Content-Type, Accept, Authorization", - ); - response.raw_header("Access-Control-Max-Age", "86400"); - response.ok() -} - #[derive(Clone)] pub struct RumaResponse(pub T); @@ -387,14 +46,3 @@ impl From for RumaResponse { t.to_response() } } - -#[cfg(feature = "conduit_bin")] -impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse -where - 'o: 'r, - T: OutgoingResponse, -{ - fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { - response(self) - } -} diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs new file mode 100644 index 00000000..d2cf3f15 --- /dev/null +++ b/src/ruma_wrapper/axum.rs @@ -0,0 +1,338 @@ +use std::{collections::BTreeMap, iter::FromIterator, str}; + +use axum::{ + async_trait, + body::{Full, HttpBody}, + extract::{FromRequest, RequestParts, TypedHeader}, + headers::{ + authorization::{Bearer, Credentials}, + Authorization, + }, + response::{IntoResponse, Response}, + BoxError, +}; +use bytes::{BufMut, Bytes, BytesMut}; +use http::StatusCode; +use ruma::{ + api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, + signatures::CanonicalJsonValue, + DeviceId, Outgoing, ServerName, UserId, +}; +use tracing::{debug, warn}; + +use super::{Ruma, RumaResponse}; +use crate::{database::DatabaseGuard, server_server, Error, Result}; + +#[async_trait] +impl FromRequest for Ruma +where + T: Outgoing, + T::Incoming: IncomingRequest, + B: HttpBody + Send, + B::Data: Send, + B::Error: Into, +{ + type Rejection = Error; + + async fn from_request(req: &mut RequestParts) -> Result { + let metadata = T::Incoming::METADATA; + let db = DatabaseGuard::from_request(req).await?; + let auth_header = Option::>>::from_request(req).await?; + + // FIXME: Do this more efficiently + let query: BTreeMap = + ruma::serde::urlencoded::from_str(req.uri().query().unwrap_or_default()) + .expect("Query to string map deserialization should be fine"); + + let token = match &auth_header { + Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), + None => query.get("access_token").map(|tok| tok.as_str()), + }; + + let mut body = Bytes::from_request(req) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + + let mut json_body = serde_json::from_slice::(&body).ok(); + + let appservices = db.appservice.all().unwrap(); + let appservice_registration = appservices.iter().find(|(_id, registration)| { + registration + .get("as_token") + .and_then(|as_token| as_token.as_str()) + .map_or(false, |as_token| token == Some(as_token)) + }); + + let (sender_user, sender_device, sender_servername, from_appservice) = + if let Some((_id, registration)) = appservice_registration { + match metadata.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + let user_id = query.get("user_id").map_or_else( + || { + UserId::parse_with_server_name( + registration + .get("sender_localpart") + .unwrap() + .as_str() + .unwrap(), + db.globals.server_name(), + ) + .unwrap() + }, + |s| UserId::parse(s.as_str()).unwrap(), + ); + + if !db.users.exists(&user_id).unwrap() { + return Err(forbidden()); + } + + // TODO: Check if appservice is allowed to be that user + (Some(user_id), None, None, true) + } + AuthScheme::ServerSignatures => (None, None, None, true), + AuthScheme::None => (None, None, None, true), + } + } else { + match metadata.authentication { + AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + let token = match token { + Some(token) => token, + _ => return Err(missing_token()), + }; + + match db.users.find_from_token(token).unwrap() { + None => return Err(unknown_token()), + Some((user_id, device_id)) => ( + Some(user_id), + Some(Box::::from(device_id)), + None, + false, + ), + } + } + AuthScheme::ServerSignatures => { + let TypedHeader(Authorization(x_matrix)) = + TypedHeader::>::from_request(req) + .await + .map_err(|e| { + warn!("Missing or invalid Authorization header: {}", e); + forbidden() + })?; + + let origin_signatures = BTreeMap::from_iter([( + x_matrix.key.clone(), + CanonicalJsonValue::String(x_matrix.sig), + )]); + + let signatures = BTreeMap::from_iter([( + x_matrix.origin.as_str().to_owned(), + CanonicalJsonValue::Object(origin_signatures), + )]); + + let mut request_map = BTreeMap::from_iter([ + ( + "method".to_owned(), + CanonicalJsonValue::String(req.method().to_string()), + ), + ( + "uri".to_owned(), + CanonicalJsonValue::String(req.uri().to_string()), + ), + ( + "origin".to_owned(), + CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()), + ), + ( + "destination".to_owned(), + CanonicalJsonValue::String( + db.globals.server_name().as_str().to_owned(), + ), + ), + ( + "signatures".to_owned(), + CanonicalJsonValue::Object(signatures), + ), + ]); + + if let Some(json_body) = &json_body { + request_map.insert("content".to_owned(), json_body.clone()); + }; + + let keys_result = server_server::fetch_signing_keys( + &db, + &x_matrix.origin, + vec![x_matrix.key.to_owned()], + ) + .await; + + let keys = match keys_result { + Ok(b) => b, + Err(e) => { + warn!("Failed to fetch signing keys: {}", e); + return Err(forbidden()); + } + }; + + let pub_key_map = + BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); + + match ruma::signatures::verify_json(&pub_key_map, &request_map) { + Ok(()) => (None, None, Some(x_matrix.origin), false), + Err(e) => { + warn!( + "Failed to verify json request from {}: {}\n{:?}", + x_matrix.origin, e, request_map + ); + + if req.uri().to_string().contains('@') { + warn!( + "Request uri contained '@' character. Make sure your \ + reverse proxy gives Conduit the raw uri (apache: use \ + nocanon)" + ); + } + + return Err(forbidden()); + } + } + } + AuthScheme::None => (None, None, None, false), + } + }; + + let mut http_request = http::Request::builder().uri(req.uri()).method(req.method()); + *http_request.headers_mut().unwrap() = + req.headers().expect("Headers already extracted").clone(); + + if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { + let user_id = sender_user.clone().unwrap_or_else(|| { + UserId::parse_with_server_name("", db.globals.server_name()) + .expect("we know this is valid") + }); + + let uiaa_request = json_body + .get("auth") + .and_then(|auth| auth.as_object()) + .and_then(|auth| auth.get("session")) + .and_then(|session| session.as_str()) + .and_then(|session| { + db.uiaa.get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) + }); + + if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { + for (key, value) in initial_request { + json_body.entry(key).or_insert(value); + } + } + + let mut buf = BytesMut::new().writer(); + serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail"); + body = buf.into_inner().freeze(); + } + + let http_request = http_request.body(&*body).unwrap(); + + debug!("{:?}", http_request); + + let body = + ::try_from_http_request(http_request).map_err(|e| { + warn!("{:?}", e); + bad_json() + })?; + + Ok(Ruma { + body, + sender_user, + sender_device, + sender_servername, + from_appservice, + json_body, + }) + } +} + +fn forbidden() -> Error { + Error::BadRequest(ErrorKind::Forbidden, "Forbidden.") +} + +fn unknown_token() -> Error { + Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown token.", + ) +} + +fn missing_token() -> Error { + Error::BadRequest(ErrorKind::MissingToken, "Missing token.") +} + +fn bad_json() -> Error { + Error::BadRequest(ErrorKind::BadJson, "Bad json.") +} + +struct XMatrix { + origin: Box, + key: String, // KeyName? + sig: String, +} + +impl Credentials for XMatrix { + const SCHEME: &'static str = "X-Matrix"; + + fn decode(value: &http::HeaderValue) -> Option { + debug_assert!( + value.as_bytes().starts_with(b"X-Matrix "), + "HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}", + value, + ); + + let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..]) + .ok()? + .trim_start(); + + let mut origin = None; + let mut key = None; + let mut sig = None; + + for entry in parameters.split_terminator(',') { + let (name, value) = entry.split_once('=')?; + + // FIXME: Catch multiple fields of the same name + match name { + "origin" => origin = Some(value.try_into().ok()?), + "key" => key = Some(value.to_owned()), + "sig" => sig = Some(value.to_owned()), + _ => warn!( + "Unexpected field `{}` in X-Matrix Authorization header", + name + ), + } + } + + Some(Self { + origin: origin?, + key: key?, + sig: sig?, + }) + } + + fn encode(&self) -> http::HeaderValue { + todo!() + } +} + +impl IntoResponse for RumaResponse +where + T: OutgoingResponse, +{ + fn into_response(self) -> Response { + match self.0.try_into_http_response::() { + Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(), + Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), + } + } +} diff --git a/src/server_server.rs b/src/server_server.rs index a39b3a53..5e6fab01 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -4,13 +4,11 @@ use crate::{ pdu::EventHash, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, }; +use axum::{response::IntoResponse, Json}; +use futures_util::{stream::FuturesUnordered, StreamExt}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; use regex::Regex; -use rocket::{ - futures::{prelude::*, stream::FuturesUnordered}, - response::content::Json, -}; use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, @@ -72,9 +70,6 @@ use std::{ use tokio::sync::{MutexGuard, Semaphore}; use tracing::{debug, error, info, trace, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; - /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). /// @@ -495,10 +490,10 @@ async fn request_well_known( /// # `GET /_matrix/federation/v1/version` /// /// Get version information on this server. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] -#[tracing::instrument(skip(db))] -pub fn get_server_version_route( +#[tracing::instrument(skip(db, _body))] +pub async fn get_server_version_route( db: DatabaseGuard, + _body: Ruma, ) -> ConduitResult { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -520,12 +515,11 @@ pub fn get_server_version_route( /// - Matrix does not support invalidating public keys, so the key returned by this will be valid /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response -#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] #[tracing::instrument(skip(db))] -pub fn get_server_keys_route(db: DatabaseGuard) -> Json { +pub async fn get_server_keys_route(db: DatabaseGuard) -> impl IntoResponse { if !db.globals.allow_federation() { // TODO: Use proper types - return Json("Federation is disabled.".to_owned()); + return Json("Federation is disabled.").into_response(); } let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); @@ -563,7 +557,7 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { ) .unwrap(); - Json(serde_json::to_string(&response).expect("JSON is canonical")) + Json(response).into_response() } /// # `GET /_matrix/key/v2/server/{keyId}` @@ -572,19 +566,14 @@ pub fn get_server_keys_route(db: DatabaseGuard) -> Json { /// /// - Matrix does not support invalidating public keys, so the key returned by this will be valid /// forever. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] #[tracing::instrument(skip(db))] -pub fn get_server_keys_deprecated_route(db: DatabaseGuard) -> Json { - get_server_keys_route(db) +pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoResponse { + get_server_keys_route(db).await } /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/publicRooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, @@ -628,10 +617,6 @@ pub async fn get_public_rooms_filtered_route( /// # `GET /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/publicRooms", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: DatabaseGuard, @@ -675,10 +660,6 @@ pub async fn get_public_rooms_route( /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v1/send/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn send_transaction_message_route( db: DatabaseGuard, @@ -2309,12 +2290,8 @@ fn get_auth_chain_inner( /// Retrieves a single event from the server. /// /// - Only works if a user of this server is currently invited or joined the room -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/event/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_event_route( +pub async fn get_event_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2358,12 +2335,8 @@ pub fn get_event_route( /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/get_missing_events/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_missing_events_route( +pub async fn get_missing_events_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2436,12 +2409,8 @@ pub fn get_missing_events_route( /// Retrieves the auth chain for a given event. /// /// - This does not include the event itself -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/event_auth/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_event_authorization_route( +pub async fn get_event_authorization_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2490,12 +2459,8 @@ pub fn get_event_authorization_route( /// # `GET /_matrix/federation/v1/state/{roomId}` /// /// Retrieves the current state of the room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/state/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_room_state_route( +pub async fn get_room_state_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2555,12 +2520,8 @@ pub fn get_room_state_route( /// # `GET /_matrix/federation/v1/state_ids/{roomId}` /// /// Retrieves the current state of the room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/state_ids/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_room_state_ids_route( +pub async fn get_room_state_ids_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2609,12 +2570,8 @@ pub fn get_room_state_ids_route( /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// /// Creates a join template. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/make_join/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn create_join_event_template_route( +pub async fn create_join_event_template_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -2895,10 +2852,6 @@ async fn create_join_event( /// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v1/send_join/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_join_event_v1_route( db: DatabaseGuard, @@ -2917,10 +2870,6 @@ pub async fn create_join_event_v1_route( /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v2/send_join/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_join_event_v2_route( db: DatabaseGuard, @@ -2939,10 +2888,6 @@ pub async fn create_join_event_v2_route( /// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` /// /// Invites a remote user to a room. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v2/invite/<_>/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn create_invite_route( db: DatabaseGuard, @@ -3055,12 +3000,8 @@ pub async fn create_invite_route( /// # `GET /_matrix/federation/v1/user/devices/{userId}` /// /// Gets information on all devices of the user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/user/devices/<_>", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_devices_route( +pub async fn get_devices_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -3098,12 +3039,8 @@ pub fn get_devices_route( /// # `GET /_matrix/federation/v1/query/directory` /// /// Resolve a room alias to a room id. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/query/directory", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_room_information_route( +pub async fn get_room_information_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -3129,12 +3066,8 @@ pub fn get_room_information_route( /// # `GET /_matrix/federation/v1/query/profile` /// /// Gets information on a profile. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/query/profile", data = "") -)] #[tracing::instrument(skip(db, body))] -pub fn get_profile_information_route( +pub async fn get_profile_information_route( db: DatabaseGuard, body: Ruma>, ) -> ConduitResult { @@ -3172,10 +3105,6 @@ pub fn get_profile_information_route( /// # `POST /_matrix/federation/v1/user/keys/query` /// /// Gets devices and identity keys for the given users. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/user/keys/query", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: DatabaseGuard, @@ -3206,10 +3135,6 @@ pub async fn get_keys_route( /// # `POST /_matrix/federation/v1/user/keys/claim` /// /// Claims one-time keys. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/user/keys/claim", data = "") -)] #[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: DatabaseGuard, diff --git a/src/utils.rs b/src/utils.rs index e2d71f4c..7142b3f0 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -3,7 +3,7 @@ use cmp::Ordering; use rand::prelude::*; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ - cmp, + cmp, fmt, str::FromStr, time::{SystemTime, UNIX_EPOCH}, }; @@ -140,3 +140,40 @@ pub fn deserialize_from_str< } deserializer.deserialize_str(Visitor(std::marker::PhantomData)) } + +// Copied from librustdoc: +// https://github.com/rust-lang/rust/blob/cbaeec14f90b59a91a6b0f17fc046c66fa811892/src/librustdoc/html/escape.rs + +/// Wrapper struct which will emit the HTML-escaped version of the contained +/// string when passed to a format string. +pub struct HtmlEscape<'a>(pub &'a str); + +impl<'a> fmt::Display for HtmlEscape<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + // Because the internet is always right, turns out there's not that many + // characters to escape: http://stackoverflow.com/questions/7381974 + let HtmlEscape(s) = *self; + let pile_o_bits = s; + let mut last = 0; + for (i, ch) in s.char_indices() { + let s = match ch { + '>' => ">", + '<' => "<", + '&' => "&", + '\'' => "'", + '"' => """, + _ => continue, + }; + fmt.write_str(&pile_o_bits[last..i])?; + fmt.write_str(s)?; + // NOTE: we only expect single byte characters here - which is fine as long as we + // only match single byte characters + last = i + 1; + } + + if last < s.len() { + fmt.write_str(&pile_o_bits[last..])?; + } + Ok(()) + } +} diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index f6c62fe8..22016e91 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -27,19 +27,18 @@ RUN chmod +x /workdir/caddy COPY conduit-example.toml conduit.toml ENV SERVER_NAME=localhost -ENV ROCKET_LOG=normal ENV CONDUIT_CONFIG=/workdir/conduit.toml RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml RUN echo "allow_federation = true" >> conduit.toml RUN echo "allow_encryption = true" >> conduit.toml RUN echo "allow_registration = true" >> conduit.toml -RUN echo "log = \"info,rocket=info,_=off,sled=off\"" >> conduit.toml +RUN echo "log = \"info,_=off,sled=off\"" >> conduit.toml RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml # Enabled Caddy auto cert generation for complement provided CA. -RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json - +RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json + EXPOSE 8008 8448 CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && true) || \ From d1d22170199fd5e8827ad71b9cad621e129ba519 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 13:32:21 +0100 Subject: [PATCH 184/445] Clean up error handling for server_server::get_server_keys_route --- src/server_server.rs | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index 5e6fab01..fc3681bd 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -516,10 +516,9 @@ pub async fn get_server_version_route( /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response #[tracing::instrument(skip(db))] -pub async fn get_server_keys_route(db: DatabaseGuard) -> impl IntoResponse { +pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { if !db.globals.allow_federation() { - // TODO: Use proper types - return Json("Federation is disabled.").into_response(); + return Err(Error::bad_config("Federation is disabled.")); } let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); @@ -557,7 +556,7 @@ pub async fn get_server_keys_route(db: DatabaseGuard) -> impl IntoResponse { ) .unwrap(); - Json(response).into_response() + Ok(Json(response)) } /// # `GET /_matrix/key/v2/server/{keyId}` From a5757ab1950b3e498793a14e359412851542d1ac Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 13:51:55 +0100 Subject: [PATCH 185/445] Generalize RumaHandler --- src/main.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/main.rs b/src/main.rs index 3ab12941..8a9d2c57 100644 --- a/src/main.rs +++ b/src/main.rs @@ -12,6 +12,7 @@ use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, handler::Handler, + response::IntoResponse, routing::{get, on, MethodFilter}, Router, }; @@ -256,10 +257,7 @@ fn routes() -> Router { .ruma_route(client_server::get_state_events_route) .ruma_route(client_server::get_state_events_for_key_route) .ruma_route(client_server::get_state_events_for_empty_key_route) - .route( - "/_matrix/client/r0/sync", - get(client_server::sync_events_route), - ) + .ruma_route(client_server::sync_events_route) .ruma_route(client_server::get_context_route) .ruma_route(client_server::get_message_events_route) .ruma_route(client_server::search_events_route) @@ -375,14 +373,16 @@ macro_rules! impl_ruma_handler { ( $($ty:ident),* $(,)? ) => { #[axum::async_trait] #[allow(non_snake_case)] - impl RumaHandler<($($ty,)* Ruma,)> for F + impl RumaHandler<($($ty,)* Ruma,)> for F where Req: Outgoing, Req::Incoming: IncomingRequest + Send, F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, - Fut: Future::OutgoingResponse + Fut: Future::OutgoingResponse>, + E, >> + Send, + E: IntoResponse, $( $ty: FromRequest + Send, )* { const METADATA: Metadata = Req::Incoming::METADATA; From 7bf538f5498fa8affa80700ec7e91c5266e9961b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 14:45:12 +0100 Subject: [PATCH 186/445] Fix axum route conflicts --- src/main.rs | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 8a9d2c57..46df5d6e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -253,10 +253,15 @@ fn routes() -> Router { .ruma_route(client_server::get_protocols_route) .ruma_route(client_server::send_message_event_route) .ruma_route(client_server::send_state_event_for_key_route) - .ruma_route(client_server::send_state_event_for_empty_key_route) .ruma_route(client_server::get_state_events_route) .ruma_route(client_server::get_state_events_for_key_route) - .ruma_route(client_server::get_state_events_for_empty_key_route) + // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes + // share one Ruma request / response type pair with {get,send}_state_event_for_key_route + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) .ruma_route(client_server::sync_events_route) .ruma_route(client_server::get_context_route) .ruma_route(client_server::get_message_events_route) From 77a87881c913bca384403e377239c95b2dfa19f5 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 15:03:58 +0100 Subject: [PATCH 187/445] Add message to unsupported HTTP method panic --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index 46df5d6e..53b18255 100644 --- a/src/main.rs +++ b/src/main.rs @@ -363,7 +363,7 @@ impl RouterExt for Router { Method::POST => MethodFilter::POST, Method::PUT => MethodFilter::PUT, Method::TRACE => MethodFilter::TRACE, - _ => panic!(""), + m => panic!("Unsupported HTTP method: {:?}", m), }; self.route(meta.path, on(method_filter, handler)) From 5fa9190117805ff1040c69b65a3b9caacb6c965b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 16:58:32 +0100 Subject: [PATCH 188/445] Simplify return type of most route handlers --- src/client_server/account.rs | 34 +++++------ src/client_server/alias.rs | 21 ++++--- src/client_server/backup.rs | 66 +++++++++----------- src/client_server/capabilities.rs | 6 +- src/client_server/config.rs | 18 +++--- src/client_server/context.rs | 6 +- src/client_server/device.rs | 22 +++---- src/client_server/directory.rs | 29 ++++----- src/client_server/filter.rs | 12 ++-- src/client_server/keys.rs | 28 ++++----- src/client_server/media.rs | 33 +++++----- src/client_server/membership.rs | 55 ++++++++--------- src/client_server/message.rs | 12 ++-- src/client_server/presence.rs | 11 ++-- src/client_server/profile.rs | 34 +++++------ src/client_server/push.rs | 45 +++++++------- src/client_server/read_marker.rs | 10 +-- src/client_server/redact.rs | 6 +- src/client_server/report.rs | 6 +- src/client_server/room.rs | 21 +++---- src/client_server/search.rs | 7 +-- src/client_server/session.rs | 26 ++++---- src/client_server/state.rs | 20 +++--- src/client_server/sync.rs | 8 +-- src/client_server/tag.rs | 15 +++-- src/client_server/thirdparty.rs | 7 +-- src/client_server/to_device.rs | 6 +- src/client_server/typing.rs | 6 +- src/client_server/unversioned.rs | 6 +- src/client_server/user_directory.rs | 6 +- src/client_server/voip.rs | 10 ++- src/database/abstraction/rocksdb.rs | 6 +- src/database/globals.rs | 6 +- src/database/uiaa.rs | 7 ++- src/lib.rs | 2 +- src/main.rs | 62 ++++++++++--------- src/ruma_wrapper.rs | 3 - src/server_server.rs | 94 ++++++++++++----------------- 38 files changed, 358 insertions(+), 414 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index bf1a74dd..2b2e6e65 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -4,7 +4,7 @@ use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ database::{admin::make_user_admin, DatabaseGuard}, pdu::PduBuilder, - utils, ConduitResult, Error, Ruma, + utils, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -44,7 +44,7 @@ const GUEST_NAME_LENGTH: usize = 10; pub async fn get_register_available_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { // Validate user id let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) @@ -68,7 +68,7 @@ pub async fn get_register_available_route( // TODO add check for appservice namespaces // If no if check is true we have an username that's available to be used. - Ok(get_username_availability::Response { available: true }.into()) + Ok(get_username_availability::Response { available: true }) } /// # `POST /_matrix/client/r0/register` @@ -88,7 +88,7 @@ pub async fn get_register_available_route( pub async fn register_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -212,8 +212,7 @@ pub async fn register_route( access_token: None, user_id, device_id: None, - } - .into()); + }); } // Generate new device id if the user didn't specify one @@ -251,8 +250,7 @@ pub async fn register_route( access_token: Some(token), user_id, device_id: Some(device_id), - } - .into()) + }) } /// # `POST /_matrix/client/r0/account/password` @@ -273,7 +271,7 @@ pub async fn register_route( pub async fn change_password_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -326,7 +324,7 @@ pub async fn change_password_route( db.flush()?; - Ok(change_password::Response {}.into()) + Ok(change_password::Response {}) } /// # `GET _matrix/client/r0/account/whoami` @@ -335,12 +333,11 @@ pub async fn change_password_route( /// /// Note: Also works for Application Services #[tracing::instrument(skip(body))] -pub async fn whoami_route(body: Ruma) -> ConduitResult { +pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(whoami::Response { user_id: sender_user.clone(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/account/deactivate` @@ -357,7 +354,7 @@ pub async fn whoami_route(body: Ruma) -> ConduitResult>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -452,8 +449,7 @@ pub async fn deactivate_route( Ok(deactivate::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, - } - .into()) + }) } /// # `GET _matrix/client/r0/account/3pid` @@ -461,10 +457,8 @@ pub async fn deactivate_route( /// Get a list of third party identifiers associated with this account. /// /// - Currently always returns empty list -pub async fn third_party_route( - body: Ruma, -) -> ConduitResult { +pub async fn third_party_route(body: Ruma) -> Result { let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_3pids::Response::new(Vec::new()).into()) + Ok(get_3pids::Response::new(Vec::new())) } diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 6e1b43e8..eecd72a4 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Ruma}; +use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; use regex::Regex; use ruma::{ api::{ @@ -19,7 +19,7 @@ use ruma::{ pub async fn create_alias_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -36,7 +36,7 @@ pub async fn create_alias_route( db.flush()?; - Ok(create_alias::Response::new().into()) + Ok(create_alias::Response::new()) } /// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}` @@ -49,7 +49,7 @@ pub async fn create_alias_route( pub async fn delete_alias_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -63,7 +63,7 @@ pub async fn delete_alias_route( db.flush()?; - Ok(delete_alias::Response::new().into()) + Ok(delete_alias::Response::new()) } /// # `GET /_matrix/client/r0/directory/room/{roomAlias}` @@ -75,14 +75,14 @@ pub async fn delete_alias_route( pub async fn get_alias_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { get_alias_helper(&db, &body.room_alias).await } pub(crate) async fn get_alias_helper( db: &Database, room_alias: &RoomAliasId, -) -> ConduitResult { +) -> Result { if room_alias.server_name() != db.globals.server_name() { let response = db .sending @@ -93,7 +93,7 @@ pub(crate) async fn get_alias_helper( ) .await?; - return Ok(get_alias::Response::new(response.room_id, response.servers).into()); + return Ok(get_alias::Response::new(response.room_id, response.servers)); } let mut room_id = None; @@ -144,5 +144,8 @@ pub(crate) async fn get_alias_helper( } }; - Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_owned()]).into()) + Ok(get_alias::Response::new( + room_id, + vec![db.globals.server_name().to_owned()], + )) } diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index cc2d7c46..acff437e 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::backup::{ @@ -16,7 +16,7 @@ use ruma::api::client::{ pub async fn create_backup_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = db .key_backups @@ -24,7 +24,7 @@ pub async fn create_backup_route( db.flush()?; - Ok(create_backup::Response { version }.into()) + Ok(create_backup::Response { version }) } /// # `PUT /_matrix/client/r0/room_keys/version/{version}` @@ -34,14 +34,14 @@ pub async fn create_backup_route( pub async fn update_backup_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; db.flush()?; - Ok(update_backup::Response {}.into()) + Ok(update_backup::Response {}) } /// # `GET /_matrix/client/r0/room_keys/version` @@ -51,7 +51,7 @@ pub async fn update_backup_route( pub async fn get_latest_backup_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = @@ -67,8 +67,7 @@ pub async fn get_latest_backup_route( count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &version)?, version, - } - .into()) + }) } /// # `GET /_matrix/client/r0/room_keys/version` @@ -78,7 +77,7 @@ pub async fn get_latest_backup_route( pub async fn get_backup_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db .key_backups @@ -93,8 +92,7 @@ pub async fn get_backup_route( count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, version: body.version.to_owned(), - } - .into()) + }) } /// # `DELETE /_matrix/client/r0/room_keys/version/{version}` @@ -106,14 +104,14 @@ pub async fn get_backup_route( pub async fn delete_backup_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_backup(sender_user, &body.version)?; db.flush()?; - Ok(delete_backup::Response {}.into()) + Ok(delete_backup::Response {}) } /// # `PUT /_matrix/client/r0/room_keys/keys` @@ -127,7 +125,7 @@ pub async fn delete_backup_route( pub async fn add_backup_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -160,8 +158,7 @@ pub async fn add_backup_keys_route( Ok(add_backup_keys::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` @@ -175,7 +172,7 @@ pub async fn add_backup_keys_route( pub async fn add_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -206,8 +203,7 @@ pub async fn add_backup_key_sessions_route( Ok(add_backup_key_sessions::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -221,7 +217,7 @@ pub async fn add_backup_key_sessions_route( pub async fn add_backup_key_session_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -250,8 +246,7 @@ pub async fn add_backup_key_session_route( Ok(add_backup_key_session::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `GET /_matrix/client/r0/room_keys/keys` @@ -261,12 +256,12 @@ pub async fn add_backup_key_session_route( pub async fn get_backup_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let rooms = db.key_backups.get_all(sender_user, &body.version)?; - Ok(get_backup_keys::Response { rooms }.into()) + Ok(get_backup_keys::Response { rooms }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` @@ -276,14 +271,14 @@ pub async fn get_backup_keys_route( pub async fn get_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sessions = db .key_backups .get_room(sender_user, &body.version, &body.room_id)?; - Ok(get_backup_key_sessions::Response { sessions }.into()) + Ok(get_backup_key_sessions::Response { sessions }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -293,7 +288,7 @@ pub async fn get_backup_key_sessions_route( pub async fn get_backup_key_session_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let key_data = db @@ -304,7 +299,7 @@ pub async fn get_backup_key_session_route( "Backup key not found for this user's session.", ))?; - Ok(get_backup_key_session::Response { key_data }.into()) + Ok(get_backup_key_session::Response { key_data }) } /// # `DELETE /_matrix/client/r0/room_keys/keys` @@ -314,7 +309,7 @@ pub async fn get_backup_key_session_route( pub async fn delete_backup_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_all_keys(sender_user, &body.version)?; @@ -324,8 +319,7 @@ pub async fn delete_backup_keys_route( Ok(delete_backup_keys::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` @@ -335,7 +329,7 @@ pub async fn delete_backup_keys_route( pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -346,8 +340,7 @@ pub async fn delete_backup_key_sessions_route( Ok(delete_backup_key_sessions::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -357,7 +350,7 @@ pub async fn delete_backup_key_sessions_route( pub async fn delete_backup_key_session_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -368,6 +361,5 @@ pub async fn delete_backup_key_session_route( Ok(delete_backup_key_session::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) + }) } diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 8da6855b..3f779dc3 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,4 +1,4 @@ -use crate::{ConduitResult, Ruma}; +use crate::{Result, Ruma}; use ruma::{ api::client::r0::capabilities::{ get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, @@ -13,7 +13,7 @@ use std::collections::BTreeMap; #[tracing::instrument(skip(_body))] pub async fn get_capabilities_route( _body: Ruma, -) -> ConduitResult { +) -> Result { let mut available = BTreeMap::new(); available.insert(RoomVersionId::V5, RoomVersionStability::Stable); available.insert(RoomVersionId::V6, RoomVersionStability::Stable); @@ -24,5 +24,5 @@ pub async fn get_capabilities_route( available, }; - Ok(get_capabilities::Response { capabilities }.into()) + Ok(get_capabilities::Response { capabilities }) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 0df0decf..14a665eb 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -20,7 +20,7 @@ use serde_json::{json, value::RawValue as RawJsonValue}; pub async fn set_global_account_data_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data: serde_json::Value = serde_json::from_str(body.data.get()) @@ -41,7 +41,7 @@ pub async fn set_global_account_data_route( db.flush()?; - Ok(set_global_account_data::Response {}.into()) + Ok(set_global_account_data::Response {}) } /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` @@ -51,7 +51,7 @@ pub async fn set_global_account_data_route( pub async fn set_room_account_data_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data: serde_json::Value = serde_json::from_str(body.data.get()) @@ -72,7 +72,7 @@ pub async fn set_room_account_data_route( db.flush()?; - Ok(set_room_account_data::Response {}.into()) + Ok(set_room_account_data::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` @@ -82,7 +82,7 @@ pub async fn set_room_account_data_route( pub async fn get_global_account_data_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = db @@ -94,7 +94,7 @@ pub async fn get_global_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_global_account_data::Response { account_data }.into()) + Ok(get_global_account_data::Response { account_data }) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` @@ -104,7 +104,7 @@ pub async fn get_global_account_data_route( pub async fn get_room_account_data_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = db @@ -120,7 +120,7 @@ pub async fn get_room_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_room_account_data::Response { account_data }.into()) + Ok(get_room_account_data::Response { account_data }) } #[derive(Deserialize)] diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 1fbfee99..3d884e07 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -19,7 +19,7 @@ use tracing::error; pub async fn get_context_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -191,5 +191,5 @@ pub async fn get_context_route( state, }; - Ok(resp.into()) + Ok(resp) } diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 82d11682..e35da978 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::{ @@ -16,7 +16,7 @@ use super::SESSION_ID_LENGTH; pub async fn get_devices_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let devices: Vec = db @@ -25,7 +25,7 @@ pub async fn get_devices_route( .filter_map(|r| r.ok()) // Filter out buggy devices .collect(); - Ok(get_devices::Response { devices }.into()) + Ok(get_devices::Response { devices }) } /// # `GET /_matrix/client/r0/devices/{deviceId}` @@ -35,7 +35,7 @@ pub async fn get_devices_route( pub async fn get_device_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device = db @@ -43,7 +43,7 @@ pub async fn get_device_route( .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - Ok(get_device::Response { device }.into()) + Ok(get_device::Response { device }) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -53,7 +53,7 @@ pub async fn get_device_route( pub async fn update_device_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device = db @@ -68,7 +68,7 @@ pub async fn update_device_route( db.flush()?; - Ok(update_device::Response {}.into()) + Ok(update_device::Response {}) } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -84,7 +84,7 @@ pub async fn update_device_route( pub async fn delete_device_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -125,7 +125,7 @@ pub async fn delete_device_route( db.flush()?; - Ok(delete_device::Response {}.into()) + Ok(delete_device::Response {}) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -143,7 +143,7 @@ pub async fn delete_device_route( pub async fn delete_devices_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -186,5 +186,5 @@ pub async fn delete_devices_route( db.flush()?; - Ok(delete_devices::Response {}.into()) + Ok(delete_devices::Response {}) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 06d7a270..0f3ae306 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma}; +use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; use ruma::{ api::{ client::{ @@ -38,7 +38,7 @@ use tracing::{info, warn}; pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { get_public_rooms_filtered_helper( &db, body.server.as_deref(), @@ -59,7 +59,7 @@ pub async fn get_public_rooms_filtered_route( pub async fn get_public_rooms_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let response = get_public_rooms_filtered_helper( &db, body.server.as_deref(), @@ -68,16 +68,14 @@ pub async fn get_public_rooms_route( &IncomingFilter::default(), &IncomingRoomNetwork::Matrix, ) - .await? - .0; + .await?; Ok(get_public_rooms::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/directory/list/room/{roomId}` @@ -89,7 +87,7 @@ pub async fn get_public_rooms_route( pub async fn set_room_visibility_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); match &body.visibility { @@ -108,7 +106,7 @@ pub async fn set_room_visibility_route( db.flush()?; - Ok(set_room_visibility::Response {}.into()) + Ok(set_room_visibility::Response {}) } /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` @@ -118,15 +116,14 @@ pub async fn set_room_visibility_route( pub async fn get_room_visibility_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { Ok(get_room_visibility::Response { visibility: if db.rooms.is_public_room(&body.room_id)? { room::Visibility::Public } else { room::Visibility::Private }, - } - .into()) + }) } pub(crate) async fn get_public_rooms_filtered_helper( @@ -136,7 +133,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, -) -> ConduitResult { +) -> Result { if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str()) { let response = db @@ -172,8 +169,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()); + }); } let limit = limit.map_or(10, u64::from); @@ -353,6 +349,5 @@ pub(crate) async fn get_public_rooms_filtered_helper( prev_batch, next_batch, total_room_count_estimate: Some(total_room_count_estimate), - } - .into()) + }) } diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 6c42edd3..28610ec0 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, r0::filter::{create_filter, get_filter}, @@ -13,14 +13,14 @@ use ruma::api::client::{ pub async fn get_filter_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let filter = match db.users.get_filter(sender_user, &body.filter_id)? { Some(filter) => filter, None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), }; - Ok(get_filter::Response::new(filter).into()) + Ok(get_filter::Response::new(filter)) } /// # `PUT /_matrix/client/r0/user/{userId}/filter` @@ -30,7 +30,9 @@ pub async fn get_filter_route( pub async fn create_filter_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(create_filter::Response::new(db.users.create_filter(sender_user, &body.filter)?).into()) + Ok(create_filter::Response::new( + db.users.create_filter(sender_user, &body.filter)?, + )) } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 9a7a4e7f..d272ff41 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -1,5 +1,5 @@ use super::SESSION_ID_LENGTH; -use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma}; +use crate::{database::DatabaseGuard, utils, Database, Error, Result, Ruma}; use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -31,7 +31,7 @@ use std::collections::{BTreeMap, HashMap, HashSet}; pub async fn upload_keys_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -62,8 +62,7 @@ pub async fn upload_keys_route( Ok(upload_keys::Response { one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, - } - .into()) + }) } /// # `POST /_matrix/client/r0/keys/query` @@ -77,7 +76,7 @@ pub async fn upload_keys_route( pub async fn get_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let response = get_keys_helper( @@ -88,7 +87,7 @@ pub async fn get_keys_route( ) .await?; - Ok(response.into()) + Ok(response) } /// # `POST /_matrix/client/r0/keys/claim` @@ -98,12 +97,12 @@ pub async fn get_keys_route( pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let response = claim_keys_helper(&body.one_time_keys, &db).await?; db.flush()?; - Ok(response.into()) + Ok(response) } /// # `POST /_matrix/client/r0/keys/device_signing/upload` @@ -115,7 +114,7 @@ pub async fn claim_keys_route( pub async fn upload_signing_keys_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -165,7 +164,7 @@ pub async fn upload_signing_keys_route( db.flush()?; - Ok(upload_signing_keys::Response {}.into()) + Ok(upload_signing_keys::Response {}) } /// # `POST /_matrix/client/r0/keys/signatures/upload` @@ -175,7 +174,7 @@ pub async fn upload_signing_keys_route( pub async fn upload_signatures_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for (user_id, signed_keys) in &body.signed_keys { @@ -225,7 +224,7 @@ pub async fn upload_signatures_route( db.flush()?; - Ok(upload_signatures::Response {}.into()) + Ok(upload_signatures::Response {}) } /// # `POST /_matrix/client/r0/keys/changes` @@ -237,7 +236,7 @@ pub async fn upload_signatures_route( pub async fn get_key_changes_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device_list_updates = HashSet::new(); @@ -276,8 +275,7 @@ pub async fn get_key_changes_route( Ok(get_key_changes::Response { changed: device_list_updates.into_iter().collect(), left: Vec::new(), // TODO - } - .into()) + }) } pub(crate) async fn get_keys_helper bool>( diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 5eba17bc..615f7602 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -1,6 +1,6 @@ use crate::{ database::{media::FileMeta, DatabaseGuard}, - utils, ConduitResult, Error, Ruma, + utils, Error, Result, Ruma, }; use ruma::api::client::{ error::ErrorKind, @@ -19,11 +19,10 @@ const MXC_LENGTH: usize = 32; pub async fn get_media_config_route( db: DatabaseGuard, _body: Ruma, -) -> ConduitResult { +) -> Result { Ok(get_media_config::Response { upload_size: db.globals.max_request_size().into(), - } - .into()) + }) } /// # `POST /_matrix/media/r0/upload` @@ -36,7 +35,7 @@ pub async fn get_media_config_route( pub async fn create_content_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let mxc = format!( "mxc://{}/{}", db.globals.server_name(), @@ -62,8 +61,7 @@ pub async fn create_content_route( Ok(create_content::Response { content_uri: mxc.try_into().expect("Invalid mxc:// URI"), blurhash: None, - } - .into()) + }) } pub async fn get_remote_content( @@ -107,7 +105,7 @@ pub async fn get_remote_content( pub async fn get_content_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -120,12 +118,11 @@ pub async fn get_content_route( file, content_type, content_disposition, - } - .into()) + }) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let remote_content_response = get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; - Ok(remote_content_response.into()) + Ok(remote_content_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -140,7 +137,7 @@ pub async fn get_content_route( pub async fn get_content_as_filename_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -153,8 +150,7 @@ pub async fn get_content_as_filename_route( file, content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), - } - .into()) + }) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let remote_content_response = get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; @@ -163,8 +159,7 @@ pub async fn get_content_as_filename_route( content_disposition: Some(format!("inline: filename={}", body.filename)), content_type: remote_content_response.content_type, file: remote_content_response.file, - } - .into()) + }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } @@ -179,7 +174,7 @@ pub async fn get_content_as_filename_route( pub async fn get_content_thumbnail_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -198,7 +193,7 @@ pub async fn get_content_thumbnail_route( ) .await? { - Ok(get_content_thumbnail::Response { file, content_type }.into()) + Ok(get_content_thumbnail::Response { file, content_type }) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = db .sending @@ -228,7 +223,7 @@ pub async fn get_content_thumbnail_route( ) .await?; - Ok(get_thumbnail_response.into()) + Ok(get_thumbnail_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) } diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index c16065ef..efdf7746 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -2,7 +2,7 @@ use crate::{ client_server, database::DatabaseGuard, pdu::{EventHash, PduBuilder, PduEvent}, - server_server, utils, ConduitResult, Database, Error, Result, Ruma, + server_server, utils, Database, Error, Result, Ruma, }; use ruma::{ api::{ @@ -46,7 +46,7 @@ use tracing::{debug, error, warn}; pub async fn join_room_by_id_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut servers: HashSet<_> = db @@ -87,7 +87,7 @@ pub async fn join_room_by_id_route( pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; @@ -111,7 +111,7 @@ pub async fn join_room_by_id_or_alias_route( Err(room_alias) => { let response = client_server::get_alias_helper(&db, &room_alias).await?; - (response.0.servers.into_iter().collect(), response.0.room_id) + (response.servers.into_iter().collect(), response.room_id) } }; @@ -127,9 +127,8 @@ pub async fn join_room_by_id_or_alias_route( db.flush()?; Ok(join_room_by_id_or_alias::Response { - room_id: join_room_response.0.room_id, - } - .into()) + room_id: join_room_response.room_id, + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/leave` @@ -141,14 +140,14 @@ pub async fn join_room_by_id_or_alias_route( pub async fn leave_room_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.leave_room(sender_user, &body.room_id, &db).await?; db.flush()?; - Ok(leave_room::Response::new().into()) + Ok(leave_room::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` @@ -158,13 +157,13 @@ pub async fn leave_room_route( pub async fn invite_user_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; db.flush()?; - Ok(invite_user::Response {}.into()) + Ok(invite_user::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) } @@ -177,7 +176,7 @@ pub async fn invite_user_route( pub async fn kick_user_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( @@ -227,7 +226,7 @@ pub async fn kick_user_route( db.flush()?; - Ok(kick_user::Response::new().into()) + Ok(kick_user::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` @@ -237,7 +236,7 @@ pub async fn kick_user_route( pub async fn ban_user_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason @@ -298,7 +297,7 @@ pub async fn ban_user_route( db.flush()?; - Ok(ban_user::Response::new().into()) + Ok(ban_user::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` @@ -308,7 +307,7 @@ pub async fn ban_user_route( pub async fn unban_user_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( @@ -357,7 +356,7 @@ pub async fn unban_user_route( db.flush()?; - Ok(unban_user::Response::new().into()) + Ok(unban_user::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/forget` @@ -372,14 +371,14 @@ pub async fn unban_user_route( pub async fn forget_room_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.forget(&body.room_id, sender_user)?; db.flush()?; - Ok(forget_room::Response::new().into()) + Ok(forget_room::Response::new()) } /// # `POST /_matrix/client/r0/joined_rooms` @@ -389,7 +388,7 @@ pub async fn forget_room_route( pub async fn joined_rooms_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(joined_rooms::Response { @@ -398,8 +397,7 @@ pub async fn joined_rooms_route( .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/members` @@ -411,7 +409,7 @@ pub async fn joined_rooms_route( pub async fn get_member_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? @@ -430,8 +428,7 @@ pub async fn get_member_events_route( .filter(|(key, _)| key.0 == EventType::RoomMember) .map(|(_, pdu)| pdu.to_member_event()) .collect(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` @@ -444,7 +441,7 @@ pub async fn get_member_events_route( pub async fn joined_members_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -468,7 +465,7 @@ pub async fn joined_members_route( ); } - Ok(joined_members::Response { joined }.into()) + Ok(joined_members::Response { joined }) } #[tracing::instrument(skip(db))] @@ -478,7 +475,7 @@ async fn join_room_by_id_helper( room_id: &RoomId, servers: &HashSet>, _third_party_signed: Option<&IncomingThirdPartySigned>, -) -> ConduitResult { +) -> Result { let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( @@ -734,7 +731,7 @@ async fn join_room_by_id_helper( db.flush()?; - Ok(join_room_by_id::Response::new(room_id.to_owned()).into()) + Ok(join_room_by_id::Response::new(room_id.to_owned())) } fn validate_and_add_event_id( diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 4fb87715..c5982de1 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -22,7 +22,7 @@ use std::{ pub async fn send_message_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -62,7 +62,7 @@ pub async fn send_message_event_route( .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? .try_into() .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; - return Ok(send_message_event::Response { event_id }.into()); + return Ok(send_message_event::Response { event_id }); } let mut unsigned = BTreeMap::new(); @@ -94,7 +94,7 @@ pub async fn send_message_event_route( db.flush()?; - Ok(send_message_event::Response::new((*event_id).to_owned()).into()) + Ok(send_message_event::Response::new((*event_id).to_owned())) } /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` @@ -107,7 +107,7 @@ pub async fn send_message_event_route( pub async fn get_message_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -235,5 +235,5 @@ pub async fn get_message_events_route( ); } - Ok(resp.into()) + Ok(resp) } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 0d58ebff..aedff555 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, utils, Result, Ruma}; use ruma::api::client::r0::presence::{get_presence, set_presence}; use std::time::Duration; @@ -9,7 +9,7 @@ use std::time::Duration; pub async fn set_presence_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for room_id in db.rooms.rooms_joined(sender_user) { @@ -39,7 +39,7 @@ pub async fn set_presence_route( db.flush()?; - Ok(set_presence::Response {}.into()) + Ok(set_presence::Response {}) } /// # `GET /_matrix/client/r0/presence/{userId}/status` @@ -51,7 +51,7 @@ pub async fn set_presence_route( pub async fn get_presence_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut presence_event = None; @@ -82,8 +82,7 @@ pub async fn get_presence_route( .last_active_ago .map(|millis| Duration::from_millis(millis.into())), presence: presence.content.presence, - } - .into()) + }) } else { todo!(); } diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index bb13b448..f520d2cb 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma}; use ruma::{ api::{ client::{ @@ -23,7 +23,7 @@ use std::sync::Arc; pub async fn set_displayname_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users @@ -109,7 +109,7 @@ pub async fn set_displayname_route( db.flush()?; - Ok(set_display_name::Response {}.into()) + Ok(set_display_name::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/displayname` @@ -121,7 +121,7 @@ pub async fn set_displayname_route( pub async fn get_displayname_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -137,14 +137,12 @@ pub async fn get_displayname_route( return Ok(get_display_name::Response { displayname: response.displayname, - } - .into()); + }); } Ok(get_display_name::Response { displayname: db.users.displayname(&body.user_id)?, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url` @@ -156,7 +154,7 @@ pub async fn get_displayname_route( pub async fn set_avatar_url_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users @@ -244,7 +242,7 @@ pub async fn set_avatar_url_route( db.flush()?; - Ok(set_avatar_url::Response {}.into()) + Ok(set_avatar_url::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/avatar_url` @@ -256,7 +254,7 @@ pub async fn set_avatar_url_route( pub async fn get_avatar_url_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -273,15 +271,13 @@ pub async fn get_avatar_url_route( return Ok(get_avatar_url::Response { avatar_url: response.avatar_url, blurhash: response.blurhash, - } - .into()); + }); } Ok(get_avatar_url::Response { avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, - } - .into()) + }) } /// # `GET /_matrix/client/r0/profile/{userId}` @@ -293,7 +289,7 @@ pub async fn get_avatar_url_route( pub async fn get_profile_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -311,8 +307,7 @@ pub async fn get_profile_route( displayname: response.displayname, avatar_url: response.avatar_url, blurhash: response.blurhash, - } - .into()); + }); } if !db.users.exists(&body.user_id)? { @@ -327,6 +322,5 @@ pub async fn get_profile_route( avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, displayname: db.users.displayname(&body.user_id)?, - } - .into()) + }) } diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 322cf89a..3bc46b85 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -19,7 +19,7 @@ use ruma::{ pub async fn get_pushrules_all_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db @@ -32,8 +32,7 @@ pub async fn get_pushrules_all_route( Ok(get_pushrules_all::Response { global: event.content.global, - } - .into()) + }) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` @@ -43,7 +42,7 @@ pub async fn get_pushrules_all_route( pub async fn get_pushrule_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db @@ -80,7 +79,7 @@ pub async fn get_pushrule_route( }; if let Some(rule) = rule { - Ok(get_pushrule::Response { rule }.into()) + Ok(get_pushrule::Response { rule }) } else { Err(Error::BadRequest( ErrorKind::NotFound, @@ -96,7 +95,7 @@ pub async fn get_pushrule_route( pub async fn set_pushrule_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -183,7 +182,7 @@ pub async fn set_pushrule_route( db.flush()?; - Ok(set_pushrule::Response {}.into()) + Ok(set_pushrule::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` @@ -193,7 +192,7 @@ pub async fn set_pushrule_route( pub async fn get_pushrule_actions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -240,8 +239,7 @@ pub async fn get_pushrule_actions_route( Ok(get_pushrule_actions::Response { actions: actions.unwrap_or_default(), - } - .into()) + }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` @@ -251,7 +249,7 @@ pub async fn get_pushrule_actions_route( pub async fn set_pushrule_actions_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -309,7 +307,7 @@ pub async fn set_pushrule_actions_route( db.flush()?; - Ok(set_pushrule_actions::Response {}.into()) + Ok(set_pushrule_actions::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` @@ -319,7 +317,7 @@ pub async fn set_pushrule_actions_route( pub async fn get_pushrule_enabled_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -369,7 +367,7 @@ pub async fn get_pushrule_enabled_route( db.flush()?; - Ok(get_pushrule_enabled::Response { enabled }.into()) + Ok(get_pushrule_enabled::Response { enabled }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` @@ -379,7 +377,7 @@ pub async fn get_pushrule_enabled_route( pub async fn set_pushrule_enabled_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -442,7 +440,7 @@ pub async fn set_pushrule_enabled_route( db.flush()?; - Ok(set_pushrule_enabled::Response {}.into()) + Ok(set_pushrule_enabled::Response {}) } /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` @@ -452,7 +450,7 @@ pub async fn set_pushrule_enabled_route( pub async fn delete_pushrule_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -505,7 +503,7 @@ pub async fn delete_pushrule_route( db.flush()?; - Ok(delete_pushrule::Response {}.into()) + Ok(delete_pushrule::Response {}) } /// # `GET /_matrix/client/r0/pushers` @@ -515,13 +513,12 @@ pub async fn delete_pushrule_route( pub async fn get_pushers_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::Response { pushers: db.pusher.get_pushers(sender_user)?, - } - .into()) + }) } /// # `POST /_matrix/client/r0/pushers/set` @@ -533,7 +530,7 @@ pub async fn get_pushers_route( pub async fn set_pushers_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); @@ -541,5 +538,5 @@ pub async fn set_pushers_route( db.flush()?; - Ok(set_pusher::Response::default().into()) + Ok(set_pusher::Response::default()) } diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index c9480f00..fa2627b1 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -20,7 +20,7 @@ use std::collections::BTreeMap; pub async fn set_read_marker_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let fully_read_event = ruma::events::fully_read::FullyReadEvent { @@ -76,7 +76,7 @@ pub async fn set_read_marker_route( db.flush()?; - Ok(set_read_marker::Response {}.into()) + Ok(set_read_marker::Response {}) } /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` @@ -86,7 +86,7 @@ pub async fn set_read_marker_route( pub async fn create_receipt_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.edus.private_read_set( @@ -128,5 +128,5 @@ pub async fn create_receipt_route( db.flush()?; - Ok(create_receipt::Response {}.into()) + Ok(create_receipt::Response {}) } diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 2b442fc4..0a343e57 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma}; use ruma::{ api::client::r0::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, EventType}, @@ -17,7 +17,7 @@ use serde_json::value::to_raw_value; pub async fn redact_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -53,5 +53,5 @@ pub async fn redact_event_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(redact_event::Response { event_id }.into()) + Ok(redact_event::Response { event_id }) } diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 441e33d7..680ad5a5 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils::HtmlEscape, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, r0::room::report_content}, events::room::message, @@ -13,7 +13,7 @@ use ruma::{ pub async fn report_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pdu = match db.rooms.get_pdu(&body.event_id)? { @@ -69,5 +69,5 @@ pub async fn report_event_route( db.flush()?; - Ok(report_content::Response {}.into()) + Ok(report_content::Response {}) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 475c5b45..4640cdab 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -1,6 +1,5 @@ use crate::{ - client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Error, - Ruma, + client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -50,7 +49,7 @@ use tracing::{info, warn}; pub async fn create_room_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let room_id = RoomId::new(db.globals.server_name()); @@ -410,7 +409,7 @@ pub async fn create_room_route( db.flush()?; - Ok(create_room::Response::new(room_id).into()) + Ok(create_room::Response::new(room_id)) } /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` @@ -422,7 +421,7 @@ pub async fn create_room_route( pub async fn get_room_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -438,8 +437,7 @@ pub async fn get_room_event_route( .get_pdu(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? .to_room_event(), - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` @@ -451,7 +449,7 @@ pub async fn get_room_event_route( pub async fn get_room_aliases_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -467,8 +465,7 @@ pub async fn get_room_aliases_route( .room_aliases(&body.room_id) .filter_map(|a| a.ok()) .collect(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` @@ -485,7 +482,7 @@ pub async fn get_room_aliases_route( pub async fn upgrade_room_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { @@ -709,5 +706,5 @@ pub async fn upgrade_room_route( db.flush()?; // Return the replacement room id - Ok(upgrade_room::Response { replacement_room }.into()) + Ok(upgrade_room::Response { replacement_room }) } diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 3f8a7010..78ac51ad 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{error::ErrorKind, r0::search::search_events}; use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; @@ -13,7 +13,7 @@ use std::collections::BTreeMap; pub async fn search_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); @@ -111,6 +111,5 @@ pub async fn search_events_route( .map(str::to_lowercase) .collect(), }, - }) - .into()) + })) } diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 264eac03..dbcd28cb 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -1,5 +1,5 @@ use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -26,13 +26,10 @@ struct Claims { #[tracing::instrument(skip(_body))] pub async fn get_login_types_route( _body: Ruma, -) -> ConduitResult { - Ok( - get_login_types::Response::new(vec![get_login_types::LoginType::Password( - Default::default(), - )]) - .into(), - ) +) -> Result { + Ok(get_login_types::Response::new(vec![ + get_login_types::LoginType::Password(Default::default()), + ])) } /// # `POST /_matrix/client/r0/login` @@ -50,7 +47,7 @@ pub async fn get_login_types_route( pub async fn login_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -155,8 +152,7 @@ pub async fn login_route( home_server: Some(db.globals.server_name().to_owned()), device_id, well_known: None, - } - .into()) + }) } /// # `POST /_matrix/client/r0/logout` @@ -171,7 +167,7 @@ pub async fn login_route( pub async fn logout_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -179,7 +175,7 @@ pub async fn logout_route( db.flush()?; - Ok(logout::Response::new().into()) + Ok(logout::Response::new()) } /// # `POST /_matrix/client/r0/logout/all` @@ -197,7 +193,7 @@ pub async fn logout_route( pub async fn logout_all_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for device_id in db.users.all_device_ids(sender_user).flatten() { @@ -206,5 +202,5 @@ pub async fn logout_all_route( db.flush()?; - Ok(logout_all::Response::new().into()) + Ok(logout_all::Response::new()) } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 96b2184c..acc362fa 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::{ - database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma, + database::DatabaseGuard, pdu::PduBuilder, Database, Error, Result, Ruma, RumaResponse, }; use ruma::{ api::client::{ @@ -30,7 +30,7 @@ use ruma::{ pub async fn send_state_event_for_key_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( @@ -46,7 +46,7 @@ pub async fn send_state_event_for_key_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(send_state_event::Response { event_id }.into()) + Ok(send_state_event::Response { event_id }) } /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}` @@ -60,7 +60,7 @@ pub async fn send_state_event_for_key_route( pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled @@ -96,7 +96,7 @@ pub async fn send_state_event_for_empty_key_route( pub async fn get_state_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -131,8 +131,7 @@ pub async fn get_state_events_route( .values() .map(|pdu| pdu.to_state_event()) .collect(), - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}` @@ -144,7 +143,7 @@ pub async fn get_state_events_route( pub async fn get_state_events_for_key_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -183,8 +182,7 @@ pub async fn get_state_events_for_key_route( Ok(get_state_events_for_key::Response { content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}` @@ -196,7 +194,7 @@ pub async fn get_state_events_for_key_route( pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6ba68b0d..6410ce5d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; +use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::r0::{ filter::{IncomingFilterDefinition, LazyLoadOptions}, @@ -58,7 +58,7 @@ use tracing::error; pub async fn sync_events_route( db: DatabaseGuard, body: Ruma>, -) -> Result, RumaResponse> { +) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let body = body.body; @@ -132,7 +132,7 @@ async fn sync_helper_wrapper( sender_user: Box, sender_device: Box, body: sync_events::IncomingRequest, - tx: Sender>>, + tx: Sender>>, ) { let since = body.since.clone(); @@ -166,7 +166,7 @@ async fn sync_helper_wrapper( drop(db); - let _ = tx.send(Some(r.map(|(r, _)| r.into()))); + let _ = tx.send(Some(r.map(|(r, _)| r))); } async fn sync_helper( diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index cad3421a..edf86903 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, Result, Ruma}; use ruma::{ api::client::r0::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -17,7 +17,7 @@ use std::collections::BTreeMap; pub async fn update_tag_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db @@ -43,7 +43,7 @@ pub async fn update_tag_route( db.flush()?; - Ok(create_tag::Response {}.into()) + Ok(create_tag::Response {}) } /// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` @@ -55,7 +55,7 @@ pub async fn update_tag_route( pub async fn delete_tag_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db @@ -78,7 +78,7 @@ pub async fn delete_tag_route( db.flush()?; - Ok(delete_tag::Response {}.into()) + Ok(delete_tag::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` @@ -90,7 +90,7 @@ pub async fn delete_tag_route( pub async fn get_tags_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_tags::Response { @@ -104,6 +104,5 @@ pub async fn get_tags_route( }) .content .tags, - } - .into()) + }) } diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index d8b7972e..929503ed 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -1,4 +1,4 @@ -use crate::{ConduitResult, Ruma}; +use crate::{Result, Ruma}; use ruma::api::client::r0::thirdparty::get_protocols; use std::collections::BTreeMap; @@ -9,10 +9,9 @@ use std::collections::BTreeMap; #[tracing::instrument(skip(_body))] pub async fn get_protocols_route( _body: Ruma, -) -> ConduitResult { +) -> Result { // TODO Ok(get_protocols::Response { protocols: BTreeMap::new(), - } - .into()) + }) } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 12691185..9f67bf00 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -1,6 +1,6 @@ use std::collections::BTreeMap; -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::{ client::{error::ErrorKind, r0::to_device::send_event_to_device}, @@ -17,7 +17,7 @@ use ruma::{ pub async fn send_event_to_device_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -94,5 +94,5 @@ pub async fn send_event_to_device_route( db.flush()?; - Ok(send_event_to_device::Response {}.into()) + Ok(send_event_to_device::Response {}) } diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 3a61c584..6c1939a7 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, utils, Result, Ruma}; use create_typing_event::Typing; use ruma::api::client::r0::typing::create_typing_event; @@ -9,7 +9,7 @@ use ruma::api::client::r0::typing::create_typing_event; pub async fn create_typing_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let Typing::Yes(duration) = body.state { @@ -25,5 +25,5 @@ pub async fn create_typing_event_route( .typing_remove(sender_user, &body.room_id, &db.globals)?; } - Ok(create_typing_event::Response {}.into()) + Ok(create_typing_event::Response {}) } diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 8b1b66f2..65becda6 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,6 +1,6 @@ use std::{collections::BTreeMap, iter::FromIterator}; -use crate::{ConduitResult, Ruma}; +use crate::{Result, Ruma}; use ruma::api::client::unversioned::get_supported_versions; /// # `GET /_matrix/client/versions` @@ -16,11 +16,11 @@ use ruma::api::client::unversioned::get_supported_versions; #[tracing::instrument(skip(_body))] pub async fn get_supported_versions_route( _body: Ruma, -) -> ConduitResult { +) -> Result { let resp = get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; - Ok(resp.into()) + Ok(resp) } diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index c923ceed..a3df5839 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, Result, Ruma}; use ruma::api::client::r0::user_directory::search_users; /// # `POST /_matrix/client/r0/user_directory/search` @@ -10,7 +10,7 @@ use ruma::api::client::r0::user_directory::search_users; pub async fn search_users_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let limit = u64::from(body.limit) as usize; let mut users = db.users.iter().filter_map(|user_id| { @@ -48,5 +48,5 @@ pub async fn search_users_route( let results = users.by_ref().take(limit).collect(); let limited = users.next().is_some(); - Ok(search_users::Response { results, limited }.into()) + Ok(search_users::Response { results, limited }) } diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 6abebdcf..f3262abf 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,7 +1,6 @@ -use crate::{database::DatabaseGuard, ConduitResult, Ruma}; +use crate::{database::DatabaseGuard, Result, Ruma}; use hmac::{Hmac, Mac, NewMac}; -use ruma::api::client::r0::voip::get_turn_server_info; -use ruma::SecondsSinceUnixEpoch; +use ruma::{api::client::r0::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; use std::time::{Duration, SystemTime}; @@ -14,7 +13,7 @@ type HmacSha1 = Hmac; pub async fn turn_server_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let turn_secret = db.globals.turn_secret(); @@ -46,6 +45,5 @@ pub async fn turn_server_route( password, uris: db.globals.turn_uris().to_vec(), ttl: Duration::from_secs(db.globals.turn_ttl()), - } - .into()) + }) } diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index d6157135..2cf9d5ee 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,6 +1,10 @@ use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree}; use crate::{utils, Result}; -use std::{future::Future, pin::Pin, sync::Arc, sync::RwLock}; +use std::{ + future::Future, + pin::Pin, + sync::{Arc, RwLock}, +}; pub struct Engine { rocks: rocksdb::DBWithThreadMode, diff --git a/src/database/globals.rs b/src/database/globals.rs index decd84c3..10145111 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,4 +1,4 @@ -use crate::{database::Config, server_server::FedDest, utils, ConduitResult, Error, Result}; +use crate::{database::Config, server_server::FedDest, utils, Error, Result}; use ruma::{ api::{ client::r0::sync::sync_events, @@ -27,8 +27,8 @@ type WellKnownMap = HashMap, (FedDest, String)>; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type SyncHandle = ( - Option, // since - Receiver>>, // rx + Option, // since + Receiver>>, // rx ); pub struct Globals { diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index b0c8d6dd..b2244b5d 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -1,6 +1,7 @@ -use std::collections::BTreeMap; -use std::sync::Arc; -use std::sync::RwLock; +use std::{ + collections::BTreeMap, + sync::{Arc, RwLock}, +}; use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ diff --git a/src/lib.rs b/src/lib.rs index 135ab854..c35a1293 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -22,4 +22,4 @@ pub use config::Config; pub use database::Database; pub use error::{Error, Result}; pub use pdu::PduEvent; -pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; +pub use ruma_wrapper::{Ruma, RumaResponse}; diff --git a/src/main.rs b/src/main.rs index 53b18255..40122cf8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,7 +11,6 @@ use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, - handler::Handler, response::IntoResponse, routing::{get, on, MethodFilter}, Router, @@ -25,10 +24,7 @@ use http::{ Method, }; use opentelemetry::trace::{FutureExt, Tracer}; -use ruma::{ - api::{IncomingRequest, Metadata}, - Outgoing, -}; +use ruma::{api::IncomingRequest, Outgoing}; use tokio::{signal, sync::RwLock}; use tower::ServiceBuilder; use tower_http::{ @@ -353,25 +349,15 @@ impl RouterExt for Router { H: RumaHandler, T: 'static, { - let meta = H::METADATA; - let method_filter = match meta.method { - Method::DELETE => MethodFilter::DELETE, - Method::GET => MethodFilter::GET, - Method::HEAD => MethodFilter::HEAD, - Method::OPTIONS => MethodFilter::OPTIONS, - Method::PATCH => MethodFilter::PATCH, - Method::POST => MethodFilter::POST, - Method::PUT => MethodFilter::PUT, - Method::TRACE => MethodFilter::TRACE, - m => panic!("Unsupported HTTP method: {:?}", m), - }; - - self.route(meta.path, on(method_filter, handler)) + handler.add_to_router(self) } } -pub trait RumaHandler: Handler { - const METADATA: Metadata; +pub trait RumaHandler { + // Can't transform to a handler without boxing or relying on the nightly-only + // impl-trait-in-traits feature. Moving a small amount of extra logic into the trait + // allows bypassing both. + fn add_to_router(self, router: Router) -> Router; } macro_rules! impl_ruma_handler { @@ -380,17 +366,22 @@ macro_rules! impl_ruma_handler { #[allow(non_snake_case)] impl RumaHandler<($($ty,)* Ruma,)> for F where - Req: Outgoing, + Req: Outgoing + 'static, Req::Incoming: IncomingRequest + Send, F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, - Fut: Future::OutgoingResponse>, - E, - >> + Send, + Fut: Future::OutgoingResponse, E>> + + Send, E: IntoResponse, - $( $ty: FromRequest + Send, )* + $( $ty: FromRequest + Send + 'static, )* { - const METADATA: Metadata = Req::Incoming::METADATA; + fn add_to_router(self, router: Router) -> Router { + let meta = Req::Incoming::METADATA; + let method_filter = method_to_filter(meta.method); + + router.route(meta.path, on(method_filter, |$( $ty: $ty, )* req| async move { + self($($ty,)* req).await.map(RumaResponse) + })) + } } }; } @@ -404,3 +395,18 @@ impl_ruma_handler!(T1, T2, T3, T4, T5); impl_ruma_handler!(T1, T2, T3, T4, T5, T6); impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); + +fn method_to_filter(method: Method) -> MethodFilter { + let method_filter = match method { + Method::DELETE => MethodFilter::DELETE, + Method::GET => MethodFilter::GET, + Method::HEAD => MethodFilter::HEAD, + Method::OPTIONS => MethodFilter::OPTIONS, + Method::PATCH => MethodFilter::PATCH, + Method::POST => MethodFilter::POST, + Method::PUT => MethodFilter::PUT, + Method::TRACE => MethodFilter::TRACE, + m => panic!("Unsupported HTTP method: {:?}", m), + }; + method_filter +} diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 12be79a9..ee89cc28 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -29,9 +29,6 @@ impl Deref for Ruma { } } -/// This struct converts ruma structs to http responses. -pub type ConduitResult = Result, Error>; - #[derive(Clone)] pub struct RumaResponse(pub T); diff --git a/src/server_server.rs b/src/server_server.rs index fc3681bd..e17449e0 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2,7 +2,7 @@ use crate::{ client_server::{self, claim_keys_helper, get_keys_helper}, database::{rooms::CompressedStateEvent, DatabaseGuard}, pdu::EventHash, - utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, + utils, Database, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -494,7 +494,7 @@ async fn request_well_known( pub async fn get_server_version_route( db: DatabaseGuard, _body: Ruma, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -504,8 +504,7 @@ pub async fn get_server_version_route( name: Some("Conduit".to_owned()), version: Some(env!("CARGO_PKG_VERSION").to_owned()), }), - } - .into()) + }) } /// # `GET /_matrix/key/v2/server` @@ -577,7 +576,7 @@ pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoRes pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -590,8 +589,7 @@ pub async fn get_public_rooms_filtered_route( &body.filter, &body.room_network, ) - .await? - .0; + .await?; Ok(get_public_rooms_filtered::v1::Response { chunk: response @@ -609,8 +607,7 @@ pub async fn get_public_rooms_filtered_route( prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()) + }) } /// # `GET /_matrix/federation/v1/publicRooms` @@ -620,7 +617,7 @@ pub async fn get_public_rooms_filtered_route( pub async fn get_public_rooms_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -633,8 +630,7 @@ pub async fn get_public_rooms_route( &IncomingFilter::default(), &IncomingRoomNetwork::Matrix, ) - .await? - .0; + .await?; Ok(get_public_rooms::v1::Response { chunk: response @@ -652,8 +648,7 @@ pub async fn get_public_rooms_route( prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()) + }) } /// # `PUT /_matrix/federation/v1/send/{txnId}` @@ -663,7 +658,7 @@ pub async fn get_public_rooms_route( pub async fn send_transaction_message_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -875,7 +870,7 @@ pub async fn send_transaction_message_route( db.flush()?; - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) + Ok(send_transaction_message::v1::Response { pdus: resolved_map }) } /// An async function that can recursively call itself. @@ -2293,7 +2288,7 @@ fn get_auth_chain_inner( pub async fn get_event_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2327,8 +2322,7 @@ pub async fn get_event_route( origin: db.globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), pdu: PduEvent::convert_to_outgoing_federation_event(event), - } - .into()) + }) } /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` @@ -2338,7 +2332,7 @@ pub async fn get_event_route( pub async fn get_missing_events_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2400,7 +2394,7 @@ pub async fn get_missing_events_route( i += 1; } - Ok(get_missing_events::v1::Response { events }.into()) + Ok(get_missing_events::v1::Response { events }) } /// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` @@ -2412,7 +2406,7 @@ pub async fn get_missing_events_route( pub async fn get_event_authorization_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2451,8 +2445,7 @@ pub async fn get_event_authorization_route( .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), - } - .into()) + }) } /// # `GET /_matrix/federation/v1/state/{roomId}` @@ -2462,7 +2455,7 @@ pub async fn get_event_authorization_route( pub async fn get_room_state_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2512,8 +2505,7 @@ pub async fn get_room_state_route( .filter_map(|r| r.ok()) .collect(), pdus, - } - .into()) + }) } /// # `GET /_matrix/federation/v1/state_ids/{roomId}` @@ -2523,7 +2515,7 @@ pub async fn get_room_state_route( pub async fn get_room_state_ids_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2562,8 +2554,7 @@ pub async fn get_room_state_ids_route( Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), pdu_ids, - } - .into()) + }) } /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` @@ -2573,7 +2564,7 @@ pub async fn get_room_state_ids_route( pub async fn create_join_event_template_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2738,8 +2729,7 @@ pub async fn create_join_event_template_route( Ok(create_join_event_template::v1::Response { room_version: Some(room_version_id), event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - } - .into()) + }) } async fn create_join_event( @@ -2855,7 +2845,7 @@ async fn create_join_event( pub async fn create_join_event_v1_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_servername = body .sender_servername .as_ref() @@ -2863,7 +2853,7 @@ pub async fn create_join_event_v1_route( let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; - Ok(create_join_event::v1::Response { room_state }.into()) + Ok(create_join_event::v1::Response { room_state }) } /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` @@ -2873,7 +2863,7 @@ pub async fn create_join_event_v1_route( pub async fn create_join_event_v2_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { let sender_servername = body .sender_servername .as_ref() @@ -2881,7 +2871,7 @@ pub async fn create_join_event_v2_route( let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; - Ok(create_join_event::v2::Response { room_state }.into()) + Ok(create_join_event::v2::Response { room_state }) } /// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` @@ -2891,7 +2881,7 @@ pub async fn create_join_event_v2_route( pub async fn create_invite_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2992,8 +2982,7 @@ pub async fn create_invite_route( Ok(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), - } - .into()) + }) } /// # `GET /_matrix/federation/v1/user/devices/{userId}` @@ -3003,7 +2992,7 @@ pub async fn create_invite_route( pub async fn get_devices_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3031,8 +3020,7 @@ pub async fn get_devices_route( }) }) .collect(), - } - .into()) + }) } /// # `GET /_matrix/federation/v1/query/directory` @@ -3042,7 +3030,7 @@ pub async fn get_devices_route( pub async fn get_room_information_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3058,8 +3046,7 @@ pub async fn get_room_information_route( Ok(get_room_information::v1::Response { room_id, servers: vec![db.globals.server_name().to_owned()], - } - .into()) + }) } /// # `GET /_matrix/federation/v1/query/profile` @@ -3069,7 +3056,7 @@ pub async fn get_room_information_route( pub async fn get_profile_information_route( db: DatabaseGuard, body: Ruma>, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3097,8 +3084,7 @@ pub async fn get_profile_information_route( blurhash, displayname, avatar_url, - } - .into()) + }) } /// # `POST /_matrix/federation/v1/user/keys/query` @@ -3108,7 +3094,7 @@ pub async fn get_profile_information_route( pub async fn get_keys_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3127,8 +3113,7 @@ pub async fn get_keys_route( device_keys: result.device_keys, master_keys: result.master_keys, self_signing_keys: result.self_signing_keys, - } - .into()) + }) } /// # `POST /_matrix/federation/v1/user/keys/claim` @@ -3138,7 +3123,7 @@ pub async fn get_keys_route( pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, -) -> ConduitResult { +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -3149,8 +3134,7 @@ pub async fn claim_keys_route( Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys, - } - .into()) + }) } #[tracing::instrument(skip(event, pub_key_map, db))] From c8951a1d9cc05a8c138be06f520a78b4cbb053c7 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 22 Jan 2022 18:38:39 +0100 Subject: [PATCH 189/445] Use axum-server for direct TLS support --- Cargo.lock | 28 +++++++++++++++++++++++++++- Cargo.toml | 2 +- src/config.rs | 8 ++++++++ src/main.rs | 29 +++++++++++++++++++++-------- 4 files changed, 57 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index f84c9829..41105b37 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -58,6 +58,12 @@ dependencies = [ "winapi", ] +[[package]] +name = "arc-swap" +version = "1.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" + [[package]] name = "arrayref" version = "0.3.6" @@ -162,6 +168,26 @@ dependencies = [ "mime", ] +[[package]] +name = "axum-server" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f9cfd9dbe28ebde5c0460067ea27c6f3b1d514b699c4e0a5aab0fb63e452a8a8" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "rustls", + "rustls-pemfile", + "tokio", + "tokio-rustls", + "tower-service", +] + [[package]] name = "base64" version = "0.12.3" @@ -365,6 +391,7 @@ name = "conduit" version = "0.3.0" dependencies = [ "axum", + "axum-server", "base64 0.13.0", "bytes", "clap", @@ -375,7 +402,6 @@ dependencies = [ "heed", "hmac", "http", - "hyper", "image", "jsonwebtoken", "lru-cache", diff --git a/Cargo.toml b/Cargo.toml index 5fb75dcb..6dedfa8d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,7 +15,7 @@ edition = "2021" [dependencies] # Web framework axum = { version = "0.4.4", features = ["headers"], optional = true } -hyper = "0.14.16" +axum-server = { version = "0.3.3", features = ["tls-rustls"] } tower = { version = "0.4.11", features = ["util"] } tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } diff --git a/src/config.rs b/src/config.rs index 48ac9816..155704b7 100644 --- a/src/config.rs +++ b/src/config.rs @@ -17,6 +17,8 @@ pub struct Config { pub address: IpAddr, #[serde(default = "default_port")] pub port: u16, + pub tls: Option, + pub server_name: Box, #[serde(default = "default_database_backend")] pub database_backend: String, @@ -69,6 +71,12 @@ pub struct Config { pub catchall: BTreeMap, } +#[derive(Clone, Debug, Deserialize)] +pub struct TlsConfig { + pub certs: String, + pub key: String, +} + const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; impl Config { diff --git a/src/main.rs b/src/main.rs index 40122cf8..22ddf3e0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,7 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::{future::Future, net::SocketAddr, sync::Arc, time::Duration}; +use std::{future::Future, io, net::SocketAddr, sync::Arc, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, @@ -15,6 +15,7 @@ use axum::{ routing::{get, on, MethodFilter}, Router, }; +use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; use figment::{ providers::{Env, Format, Toml}, Figment, @@ -117,8 +118,8 @@ async fn main() { } } -async fn run_server(config: &Config, db: Arc>) -> hyper::Result<()> { - let listen_addr = SocketAddr::from((config.address, config.port)); +async fn run_server(config: &Config, db: Arc>) -> io::Result<()> { + let addr = SocketAddr::from((config.address, config.port)); let x_requested_with = HeaderName::from_static("x-requested-with"); @@ -157,10 +158,20 @@ async fn run_server(config: &Config, db: Arc>) -> hyper::Result ) .add_extension(db.clone()); - axum::Server::bind(&listen_addr) - .serve(routes().layer(middlewares).into_make_service()) - .with_graceful_shutdown(shutdown_signal()) - .await?; + let app = routes().layer(middlewares).into_make_service(); + let handle = ServerHandle::new(); + + tokio::spawn(shutdown_signal(handle.clone())); + + match &config.tls { + Some(tls) => { + let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?; + bind_rustls(addr, conf).handle(handle).serve(app).await?; + } + None => { + bind(addr).handle(handle).serve(app).await?; + } + } // After serve exits and before exiting, shutdown the DB Database::on_shutdown(db).await; @@ -312,7 +323,7 @@ fn routes() -> Router { .ruma_route(server_server::claim_keys_route) } -async fn shutdown_signal() { +async fn shutdown_signal(handle: ServerHandle) { let ctrl_c = async { signal::ctrl_c() .await @@ -334,6 +345,8 @@ async fn shutdown_signal() { _ = ctrl_c => {}, _ = terminate => {}, } + + handle.graceful_shutdown(Some(Duration::from_secs(30))); } trait RouterExt { From 21ae63d46b744a73a3497dddde2e336993981b38 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 9 Feb 2022 12:32:18 +0100 Subject: [PATCH 190/445] Rewrite query parameter parsing --- src/ruma_wrapper/axum.rs | 30 ++++++++++++++++++++++-------- 1 file changed, 22 insertions(+), 8 deletions(-) diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index d2cf3f15..cec82126 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -18,7 +18,8 @@ use ruma::{ signatures::CanonicalJsonValue, DeviceId, Outgoing, ServerName, UserId, }; -use tracing::{debug, warn}; +use serde::Deserialize; +use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; use crate::{database::DatabaseGuard, server_server, Error, Result}; @@ -35,18 +36,31 @@ where type Rejection = Error; async fn from_request(req: &mut RequestParts) -> Result { + #[derive(Deserialize)] + struct QueryParams { + access_token: Option, + user_id: Option, + } + let metadata = T::Incoming::METADATA; let db = DatabaseGuard::from_request(req).await?; let auth_header = Option::>>::from_request(req).await?; - // FIXME: Do this more efficiently - let query: BTreeMap = - ruma::serde::urlencoded::from_str(req.uri().query().unwrap_or_default()) - .expect("Query to string map deserialization should be fine"); + let query = req.uri().query().unwrap_or_default(); + let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) { + Ok(params) => params, + Err(e) => { + error!(%query, "Failed to deserialize query parameters: {}", e); + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Failed to read query parameters", + )); + } + }; let token = match &auth_header { Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), - None => query.get("access_token").map(|tok| tok.as_str()), + None => query_params.access_token.as_deref(), }; let mut body = Bytes::from_request(req) @@ -67,7 +81,7 @@ where if let Some((_id, registration)) = appservice_registration { match metadata.authentication { AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - let user_id = query.get("user_id").map_or_else( + let user_id = query_params.user_id.map_or_else( || { UserId::parse_with_server_name( registration @@ -79,7 +93,7 @@ where ) .unwrap() }, - |s| UserId::parse(s.as_str()).unwrap(), + |s| UserId::parse(s).unwrap(), ); if !db.users.exists(&user_id).unwrap() { From 5d8c80b170292bf444c018fd5a73eaade87b171d Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 9 Feb 2022 14:01:44 +0100 Subject: [PATCH 191/445] Strip quotes from X-Matrix fields --- src/ruma_wrapper/axum.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index cec82126..c4e1d292 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -315,6 +315,13 @@ impl Credentials for XMatrix { for entry in parameters.split_terminator(',') { let (name, value) = entry.split_once('=')?; + // It's not at all clear why some fields are quoted and others not in the spec, + // let's simply accept either form for every field. + let value = value + .strip_prefix('"') + .and_then(|rest| rest.strip_suffix('"')) + .unwrap_or(value); + // FIXME: Catch multiple fields of the same name match name { "origin" => origin = Some(value.try_into().ok()?), From 9db0473ed5926ee962652be0643794241773df8e Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Wed, 9 Feb 2022 14:03:38 +0100 Subject: [PATCH 192/445] Improve error messages in Ruma wrapper FromRequest impl --- src/ruma_wrapper/axum.rs | 64 +++++++++++++++++++++++----------------- 1 file changed, 37 insertions(+), 27 deletions(-) diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index c4e1d292..71786191 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -3,7 +3,7 @@ use std::{collections::BTreeMap, iter::FromIterator, str}; use axum::{ async_trait, body::{Full, HttpBody}, - extract::{FromRequest, RequestParts, TypedHeader}, + extract::{rejection::TypedHeaderRejectionReason, FromRequest, RequestParts, TypedHeader}, headers::{ authorization::{Bearer, Credentials}, Authorization, @@ -97,7 +97,10 @@ where ); if !db.users.exists(&user_id).unwrap() { - return Err(forbidden()); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "User does not exist.", + )); } // TODO: Check if appservice is allowed to be that user @@ -111,11 +114,21 @@ where AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { let token = match token { Some(token) => token, - _ => return Err(missing_token()), + _ => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing access token.", + )) + } }; match db.users.find_from_token(token).unwrap() { - None => return Err(unknown_token()), + None => { + return Err(Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown access token.", + )) + } Some((user_id, device_id)) => ( Some(user_id), Some(Box::::from(device_id)), @@ -130,7 +143,17 @@ where .await .map_err(|e| { warn!("Missing or invalid Authorization header: {}", e); - forbidden() + + let msg = match e.reason() { + TypedHeaderRejectionReason::Missing => { + "Missing Authorization header." + } + TypedHeaderRejectionReason::Error(_) => { + "Invalid X-Matrix signatures." + } + }; + + Error::BadRequest(ErrorKind::Forbidden, msg) })?; let origin_signatures = BTreeMap::from_iter([( @@ -183,7 +206,10 @@ where Ok(b) => b, Err(e) => { warn!("Failed to fetch signing keys: {}", e); - return Err(forbidden()); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Failed to fetch signing keys.", + )); } }; @@ -206,7 +232,10 @@ where ); } - return Err(forbidden()); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Failed to verify X-Matrix signatures.", + )); } } } @@ -255,7 +284,7 @@ where let body = ::try_from_http_request(http_request).map_err(|e| { warn!("{:?}", e); - bad_json() + Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; Ok(Ruma { @@ -269,25 +298,6 @@ where } } -fn forbidden() -> Error { - Error::BadRequest(ErrorKind::Forbidden, "Forbidden.") -} - -fn unknown_token() -> Error { - Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown token.", - ) -} - -fn missing_token() -> Error { - Error::BadRequest(ErrorKind::MissingToken, "Missing token.") -} - -fn bad_json() -> Error { - Error::BadRequest(ErrorKind::BadJson, "Bad json.") -} - struct XMatrix { origin: Box, key: String, // KeyName? From 50b24b37c264cad5c0281ebb05eeddf6095cbdf8 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 02:06:30 +0100 Subject: [PATCH 193/445] Upgrade Ruma --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- src/client_server/context.rs | 9 +++------ src/client_server/search.rs | 2 +- src/error.rs | 3 +++ src/ruma_wrapper/axum.rs | 14 ++++++++------ 6 files changed, 34 insertions(+), 32 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 41105b37..042f6f3c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "assign", "js_int", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "bytes", "http", @@ -2118,7 +2118,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2129,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "ruma-api", "ruma-common", @@ -2143,7 +2143,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "assign", "bytes", @@ -2163,7 +2163,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "indexmap", "js_int", @@ -2178,7 +2178,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "indoc", "js_int", @@ -2195,7 +2195,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2206,7 +2206,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "js_int", "ruma-api", @@ -2221,7 +2221,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2236,7 +2236,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2246,7 +2246,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "thiserror", ] @@ -2254,7 +2254,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "js_int", "ruma-api", @@ -2267,7 +2267,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "js_int", "ruma-api", @@ -2282,7 +2282,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "base64 0.13.0", "bytes", @@ -2297,7 +2297,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2308,7 +2308,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2325,7 +2325,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=f7a10a7e471b59d3096be2695c2a05d407d80df1#f7a10a7e471b59d3096be2695c2a05d407d80df1" +source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 6dedfa8d..8ce097d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f7a10a7e471b59d3096be2695c2a05d407d80df1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f130d09daabf021ad30750eed89483a0f45f820a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 3d884e07..6f3e7778 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -23,13 +23,10 @@ pub async fn get_context_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - // Load filter - let filter = body.filter.clone().unwrap_or_default(); - - let (lazy_load_enabled, lazy_load_send_redundant) = match filter.lazy_load_options { + let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options { LazyLoadOptions::Enabled { - include_redundant_members: redundant, - } => (true, redundant), + include_redundant_members, + } => (true, *include_redundant_members), _ => (false, false), }; diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 78ac51ad..067eddce 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -17,7 +17,7 @@ pub async fn search_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); - let filter = search_criteria.filter.clone().unwrap_or_default(); + let filter = &search_criteria.filter; let room_ids = filter.rooms.clone().unwrap_or_else(|| { db.rooms diff --git a/src/error.rs b/src/error.rs index 817ef50f..a16a3abd 100644 --- a/src/error.rs +++ b/src/error.rs @@ -77,6 +77,9 @@ pub enum Error { #[cfg(feature = "conduit_bin")] #[error("{0}")] ExtensionError(#[from] axum::extract::rejection::ExtensionRejection), + #[cfg(feature = "conduit_bin")] + #[error("{0}")] + PathError(#[from] axum::extract::rejection::PathRejection), } impl Error { diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index 71786191..d8e7f51a 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -3,7 +3,9 @@ use std::{collections::BTreeMap, iter::FromIterator, str}; use axum::{ async_trait, body::{Full, HttpBody}, - extract::{rejection::TypedHeaderRejectionReason, FromRequest, RequestParts, TypedHeader}, + extract::{ + rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader, + }, headers::{ authorization::{Bearer, Credentials}, Authorization, @@ -45,6 +47,7 @@ where let metadata = T::Incoming::METADATA; let db = DatabaseGuard::from_request(req).await?; let auth_header = Option::>>::from_request(req).await?; + let path_params = Path::>::from_request(req).await?; let query = req.uri().query().unwrap_or_default(); let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) { @@ -281,11 +284,10 @@ where debug!("{:?}", http_request); - let body = - ::try_from_http_request(http_request).map_err(|e| { - warn!("{:?}", e); - Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") - })?; + let body = T::Incoming::try_from_http_request(http_request, &path_params).map_err(|e| { + warn!("{:?}", e); + Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") + })?; Ok(Ruma { body, From ce714cfd07c95843d18a90f5596bc4597d4b5577 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 13:20:55 +0100 Subject: [PATCH 194/445] Bump version --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 042f6f3c..a56103e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -388,7 +388,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.3.0" +version = "0.3.0-next" dependencies = [ "axum", "axum-server", diff --git a/Cargo.toml b/Cargo.toml index 8ce097d8..aa6bdbb9 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.3.0" +version = "0.3.0-next" rust-version = "1.56" edition = "2021" From d74074ad53e7a02fde5c78600ea47e96bf061826 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 15:01:28 +0100 Subject: [PATCH 195/445] Remove tracing::instrument attribute from util functions They don't ever log anything, so the extra context is never used. --- src/utils.rs | 8 -------- 1 file changed, 8 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 7142b3f0..1ad0aa3f 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -8,7 +8,6 @@ use std::{ time::{SystemTime, UNIX_EPOCH}, }; -#[tracing::instrument] pub fn millis_since_unix_epoch() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) @@ -39,19 +38,16 @@ pub fn generate_keypair() -> Vec { } /// Parses the bytes into an u64. -#[tracing::instrument(skip(bytes))] pub fn u64_from_bytes(bytes: &[u8]) -> Result { let array: [u8; 8] = bytes.try_into()?; Ok(u64::from_be_bytes(array)) } /// Parses the bytes into a string. -#[tracing::instrument(skip(bytes))] pub fn string_from_bytes(bytes: &[u8]) -> Result { String::from_utf8(bytes.to_vec()) } -#[tracing::instrument(skip(length))] pub fn random_string(length: usize) -> String { thread_rng() .sample_iter(&rand::distributions::Alphanumeric) @@ -61,7 +57,6 @@ pub fn random_string(length: usize) -> String { } /// Calculate a new hash for the given password -#[tracing::instrument(skip(password))] pub fn calculate_hash(password: &str) -> Result { let hashing_config = Config { variant: Variant::Argon2id, @@ -72,7 +67,6 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } -#[tracing::instrument(skip(iterators, check_order))] pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, @@ -100,7 +94,6 @@ pub fn common_elements( /// Fallible conversion from any value that implements `Serialize` to a `CanonicalJsonObject`. /// /// `value` must serialize to an `serde_json::Value::Object`. -#[tracing::instrument(skip(value))] pub fn to_canonical_object( value: T, ) -> Result { @@ -114,7 +107,6 @@ pub fn to_canonical_object( } } -#[tracing::instrument(skip(deserializer))] pub fn deserialize_from_str< 'de, D: serde::de::Deserializer<'de>, From adeb8ee425176643e49d1817b95d9d8cdee325e8 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 15:03:07 +0100 Subject: [PATCH 196/445] Remove no-op conversions --- src/server_server.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index e17449e0..42e44c6c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -814,7 +814,7 @@ pub async fn send_transaction_message_route( // Check if this is a new transaction id if db .transaction_ids - .existing_txnid(&sender, None, (&*message_id).into())? + .existing_txnid(&sender, None, &message_id)? .is_some() { continue; @@ -862,7 +862,7 @@ pub async fn send_transaction_message_route( // Save transaction id with empty data db.transaction_ids - .add_txnid(&sender, None, (&*message_id).into(), &[])?; + .add_txnid(&sender, None, &message_id, &[])?; } Edu::_Custom(_) => {} } From 2a00c547a1baca5e2ca57966ef5ce5c7f063f367 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 8 Feb 2022 09:25:44 +0100 Subject: [PATCH 197/445] improvement: faster /syncs --- src/client_server/sync.rs | 63 ++++++++++++++++++++++----------------- src/database.rs | 1 + src/database/rooms.rs | 37 ++++++++++++++++++++++- 3 files changed, 72 insertions(+), 29 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 7cfea5af..1ccf7982 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -245,30 +245,41 @@ async fn sync_helper( let insert_lock = mutex_insert.lock().unwrap(); drop(insert_lock); - let mut non_timeline_pdus = db - .rooms - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .take_while(|(pduid, _)| { - db.rooms - .pdu_count(pduid) - .map_or(false, |count| count > since) - }); - - // Take the last 10 events for the timeline - let timeline_pdus: Vec<_> = non_timeline_pdus - .by_ref() - .take(10) - .collect::>() - .into_iter() - .rev() - .collect(); + let timeline_pdus; + let limited; + if db.rooms.last_timeline_count(&sender_user, &room_id)? > since { + let mut non_timeline_pdus = db + .rooms + .pdus_until(&sender_user, &room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .take_while(|(pduid, _)| { + db.rooms + .pdu_count(pduid) + .map_or(false, |count| count > since) + }); + + // Take the last 10 events for the timeline + timeline_pdus = non_timeline_pdus + .by_ref() + .take(10) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are events in non_timeline_pdus + limited = non_timeline_pdus.next().is_some(); + } else { + timeline_pdus = Vec::new(); + limited = false; + } let send_notification_counts = !timeline_pdus.is_empty() || db @@ -277,10 +288,6 @@ async fn sync_helper( .last_privateread_update(&sender_user, &room_id)? > since; - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are events in non_timeline_pdus - let limited = non_timeline_pdus.next().is_some(); - let mut timeline_users = HashSet::new(); for (_, event) in &timeline_pdus { timeline_users.insert(event.sender.as_str().to_owned()); diff --git a/src/database.rs b/src/database.rs index 2b1671cd..8e95b1ef 100644 --- a/src/database.rs +++ b/src/database.rs @@ -263,6 +263,7 @@ impl Database { stateinfo_cache: Mutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), + lasttimelinecount_cache: Mutex::new(HashMap::new()), }, account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0abd2e79..17c9b743 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -32,7 +32,7 @@ use serde::Deserialize; use serde_json::value::to_raw_value; use std::{ borrow::Cow, - collections::{BTreeMap, HashMap, HashSet}, + collections::{hash_map, BTreeMap, HashMap, HashSet}, fmt::Debug, iter, mem::size_of, @@ -128,6 +128,7 @@ pub struct Rooms { )>, >, >, + pub(super) lasttimelinecount_cache: Mutex, u64>>, } impl Rooms { @@ -1331,6 +1332,10 @@ impl Rooms { &pdu_id, &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), )?; + self.lasttimelinecount_cache + .lock() + .unwrap() + .insert(pdu.room_id.clone(), count2); self.eventid_pduid .insert(pdu.event_id.as_bytes(), &pdu_id)?; @@ -1498,6 +1503,36 @@ impl Rooms { Ok(pdu_id) } + #[tracing::instrument(skip(self))] + pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + match self + .lasttimelinecount_cache + .lock() + .unwrap() + .entry(room_id.to_owned()) + { + hash_map::Entry::Vacant(v) => { + if let Some(last_count) = self + .pdus_until(&sender_user, &room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .map(|(pduid, _)| self.pdu_count(&pduid)) + .next() + { + Ok(*v.insert(last_count?)) + } else { + Ok(0) + } + } + hash_map::Entry::Occupied(o) => Ok(*o.get()), + } + } + #[tracing::instrument(skip(self))] pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); From accdb773158e78801548b696121b1f60e5bf264f Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 15:03:30 +0100 Subject: [PATCH 198/445] Clean up tracing::instrument attributes Remove it from request handler since there's already the context of the request path, added through TraceLayer. --- src/appservice_server.rs | 1 + src/client_server/account.rs | 5 ----- src/client_server/alias.rs | 3 --- src/client_server/backup.rs | 14 ------------ src/client_server/capabilities.rs | 1 - src/client_server/config.rs | 4 ---- src/client_server/context.rs | 1 - src/client_server/device.rs | 5 ----- src/client_server/directory.rs | 4 ---- src/client_server/filter.rs | 2 -- src/client_server/keys.rs | 6 ----- src/client_server/media.rs | 5 ----- src/client_server/membership.rs | 11 ---------- src/client_server/message.rs | 2 -- src/client_server/presence.rs | 2 -- src/client_server/profile.rs | 5 ----- src/client_server/push.rs | 10 --------- src/client_server/read_marker.rs | 2 -- src/client_server/redact.rs | 1 - src/client_server/report.rs | 1 - src/client_server/room.rs | 4 ---- src/client_server/search.rs | 1 - src/client_server/session.rs | 4 ---- src/client_server/state.rs | 5 ----- src/client_server/sync.rs | 1 - src/client_server/tag.rs | 3 --- src/client_server/thirdparty.rs | 1 - src/client_server/to_device.rs | 1 - src/client_server/typing.rs | 1 - src/client_server/unversioned.rs | 1 - src/client_server/user_directory.rs | 1 - src/client_server/voip.rs | 1 - src/server_server.rs | 34 +++++------------------------ 33 files changed, 7 insertions(+), 136 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index b2154b8d..8d6d0527 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -4,6 +4,7 @@ use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; +#[tracing::instrument(skip(globals, request))] pub(crate) async fn send_request( globals: &crate::database::globals::Globals, registration: serde_yaml::Value, diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 2b2e6e65..c15d820e 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -40,7 +40,6 @@ const GUEST_NAME_LENGTH: usize = 10; /// - No user or appservice on this server already claimed this username /// /// Note: This will not reserve the username, so the username might become invalid when trying to register -#[tracing::instrument(skip(db, body))] pub async fn get_register_available_route( db: DatabaseGuard, body: Ruma>, @@ -84,7 +83,6 @@ pub async fn get_register_available_route( /// - If type is not guest and no username is given: Always fails after UIAA check /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token -#[tracing::instrument(skip(db, body))] pub async fn register_route( db: DatabaseGuard, body: Ruma>, @@ -267,7 +265,6 @@ pub async fn register_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip(db, body))] pub async fn change_password_route( db: DatabaseGuard, body: Ruma>, @@ -332,7 +329,6 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -#[tracing::instrument(skip(body))] pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(whoami::Response { @@ -350,7 +346,6 @@ pub async fn whoami_route(body: Ruma) -> Result>, diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index eecd72a4..509372c4 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -15,7 +15,6 @@ use ruma::{ /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` /// /// Creates a new room alias on this server. -#[tracing::instrument(skip(db, body))] pub async fn create_alias_route( db: DatabaseGuard, body: Ruma>, @@ -45,7 +44,6 @@ pub async fn create_alias_route( /// /// - TODO: additional access control checks /// - TODO: Update canonical alias event -#[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( db: DatabaseGuard, body: Ruma>, @@ -71,7 +69,6 @@ pub async fn delete_alias_route( /// Resolve an alias locally or over federation. /// /// - TODO: Suggest more servers to join via -#[tracing::instrument(skip(db, body))] pub async fn get_alias_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index acff437e..14c239b1 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -12,7 +12,6 @@ use ruma::api::client::{ /// # `POST /_matrix/client/r0/room_keys/version` /// /// Creates a new backup. -#[tracing::instrument(skip(db, body))] pub async fn create_backup_route( db: DatabaseGuard, body: Ruma, @@ -30,7 +29,6 @@ pub async fn create_backup_route( /// # `PUT /_matrix/client/r0/room_keys/version/{version}` /// /// Update information about an existing backup. Only `auth_data` can be modified. -#[tracing::instrument(skip(db, body))] pub async fn update_backup_route( db: DatabaseGuard, body: Ruma>, @@ -47,7 +45,6 @@ pub async fn update_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about the latest backup version. -#[tracing::instrument(skip(db, body))] pub async fn get_latest_backup_route( db: DatabaseGuard, body: Ruma, @@ -73,7 +70,6 @@ pub async fn get_latest_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about an existing backup. -#[tracing::instrument(skip(db, body))] pub async fn get_backup_route( db: DatabaseGuard, body: Ruma>, @@ -100,7 +96,6 @@ pub async fn get_backup_route( /// Delete an existing key backup. /// /// - Deletes both information about the backup, as well as all key data related to the backup -#[tracing::instrument(skip(db, body))] pub async fn delete_backup_route( db: DatabaseGuard, body: Ruma>, @@ -121,7 +116,6 @@ pub async fn delete_backup_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[tracing::instrument(skip(db, body))] pub async fn add_backup_keys_route( db: DatabaseGuard, body: Ruma>, @@ -168,7 +162,6 @@ pub async fn add_backup_keys_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[tracing::instrument(skip(db, body))] pub async fn add_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, @@ -213,7 +206,6 @@ pub async fn add_backup_key_sessions_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -#[tracing::instrument(skip(db, body))] pub async fn add_backup_key_session_route( db: DatabaseGuard, body: Ruma>, @@ -252,7 +244,6 @@ pub async fn add_backup_key_session_route( /// # `GET /_matrix/client/r0/room_keys/keys` /// /// Retrieves all keys from the backup. -#[tracing::instrument(skip(db, body))] pub async fn get_backup_keys_route( db: DatabaseGuard, body: Ruma>, @@ -267,7 +258,6 @@ pub async fn get_backup_keys_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Retrieves all keys from the backup for a given room. -#[tracing::instrument(skip(db, body))] pub async fn get_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, @@ -284,7 +274,6 @@ pub async fn get_backup_key_sessions_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Retrieves a key from the backup. -#[tracing::instrument(skip(db, body))] pub async fn get_backup_key_session_route( db: DatabaseGuard, body: Ruma>, @@ -305,7 +294,6 @@ pub async fn get_backup_key_session_route( /// # `DELETE /_matrix/client/r0/room_keys/keys` /// /// Delete the keys from the backup. -#[tracing::instrument(skip(db, body))] pub async fn delete_backup_keys_route( db: DatabaseGuard, body: Ruma>, @@ -325,7 +313,6 @@ pub async fn delete_backup_keys_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Delete the keys from the backup for a given room. -#[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, body: Ruma>, @@ -346,7 +333,6 @@ pub async fn delete_backup_key_sessions_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Delete a key from the backup. -#[tracing::instrument(skip(db, body))] pub async fn delete_backup_key_session_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 3f779dc3..b1e072e7 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -10,7 +10,6 @@ use std::collections::BTreeMap; /// # `GET /_matrix/client/r0/capabilities` /// /// Get information on the supported feature set and other relevent capabilities of this server. -#[tracing::instrument(skip(_body))] pub async fn get_capabilities_route( _body: Ruma, ) -> Result { diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 14a665eb..83bb7a59 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -16,7 +16,6 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Sets some account data for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( db: DatabaseGuard, body: Ruma>, @@ -47,7 +46,6 @@ pub async fn set_global_account_data_route( /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Sets some room account data for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( db: DatabaseGuard, body: Ruma>, @@ -78,7 +76,6 @@ pub async fn set_room_account_data_route( /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Gets some account data for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( db: DatabaseGuard, body: Ruma>, @@ -100,7 +97,6 @@ pub async fn get_global_account_data_route( /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Gets some room account data for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 6f3e7778..167d0cc5 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -15,7 +15,6 @@ use tracing::error; /// /// - Only works if the user is joined (TODO: always allow, but only show events if the user was /// joined, depending on history_visibility) -#[tracing::instrument(skip(db, body))] pub async fn get_context_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index e35da978..76172d21 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -12,7 +12,6 @@ use super::SESSION_ID_LENGTH; /// # `GET /_matrix/client/r0/devices` /// /// Get metadata on all devices of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_devices_route( db: DatabaseGuard, body: Ruma, @@ -31,7 +30,6 @@ pub async fn get_devices_route( /// # `GET /_matrix/client/r0/devices/{deviceId}` /// /// Get metadata on a single device of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_device_route( db: DatabaseGuard, body: Ruma>, @@ -49,7 +47,6 @@ pub async fn get_device_route( /// # `PUT /_matrix/client/r0/devices/{deviceId}` /// /// Updates the metadata on a given device of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn update_device_route( db: DatabaseGuard, body: Ruma>, @@ -80,7 +77,6 @@ pub async fn update_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip(db, body))] pub async fn delete_device_route( db: DatabaseGuard, body: Ruma>, @@ -139,7 +135,6 @@ pub async fn delete_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 0f3ae306..75601fe7 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -34,7 +34,6 @@ use tracing::{info, warn}; /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, body: Ruma>, @@ -55,7 +54,6 @@ pub async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: DatabaseGuard, body: Ruma>, @@ -83,7 +81,6 @@ pub async fn get_public_rooms_route( /// Sets the visibility of a given room in the room directory. /// /// - TODO: Access control checks -#[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( db: DatabaseGuard, body: Ruma>, @@ -112,7 +109,6 @@ pub async fn set_room_visibility_route( /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` /// /// Gets the visibility of a given room in the room directory. -#[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 28610ec0..a606aeb4 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -9,7 +9,6 @@ use ruma::api::client::{ /// Loads a filter that was previously created. /// /// - A user can only access their own filters -#[tracing::instrument(skip(db, body))] pub async fn get_filter_route( db: DatabaseGuard, body: Ruma>, @@ -26,7 +25,6 @@ pub async fn get_filter_route( /// # `PUT /_matrix/client/r0/user/{userId}/filter` /// /// Creates a new filter to be used by other endpoints. -#[tracing::instrument(skip(db, body))] pub async fn create_filter_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index d272ff41..2ea62a87 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -27,7 +27,6 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// /// - Adds one time keys /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) -#[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( db: DatabaseGuard, body: Ruma, @@ -72,7 +71,6 @@ pub async fn upload_keys_route( /// - Always fetches users from other servers over federation /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see -#[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: DatabaseGuard, body: Ruma>, @@ -93,7 +91,6 @@ pub async fn get_keys_route( /// # `POST /_matrix/client/r0/keys/claim` /// /// Claims one-time keys -#[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, @@ -110,7 +107,6 @@ pub async fn claim_keys_route( /// Uploads end-to-end key information for the sender user. /// /// - Requires UIAA to verify password -#[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( db: DatabaseGuard, body: Ruma>, @@ -170,7 +166,6 @@ pub async fn upload_signing_keys_route( /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. -#[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( db: DatabaseGuard, body: Ruma, @@ -232,7 +227,6 @@ pub async fn upload_signatures_route( /// Gets a list of users who have updated their device identity keys since the previous sync token. /// /// - TODO: left users -#[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 615f7602..dcdea05a 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -15,7 +15,6 @@ const MXC_LENGTH: usize = 32; /// # `GET /_matrix/media/r0/config` /// /// Returns max upload size. -#[tracing::instrument(skip(db, _body))] pub async fn get_media_config_route( db: DatabaseGuard, _body: Ruma, @@ -31,7 +30,6 @@ pub async fn get_media_config_route( /// /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory -#[tracing::instrument(skip(db, body))] pub async fn create_content_route( db: DatabaseGuard, body: Ruma>, @@ -101,7 +99,6 @@ pub async fn get_remote_content( /// Load media from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -#[tracing::instrument(skip(db, body))] pub async fn get_content_route( db: DatabaseGuard, body: Ruma>, @@ -133,7 +130,6 @@ pub async fn get_content_route( /// Load media from our server or over federation, permitting desired filename. /// /// - Only allows federation if `allow_remote` is true -#[tracing::instrument(skip(db, body))] pub async fn get_content_as_filename_route( db: DatabaseGuard, body: Ruma>, @@ -170,7 +166,6 @@ pub async fn get_content_as_filename_route( /// Load media thumbnail from our server or over federation. /// /// - Only allows federation if `allow_remote` is true -#[tracing::instrument(skip(db, body))] pub async fn get_content_thumbnail_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index efdf7746..447f829e 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -42,7 +42,6 @@ use tracing::{debug, error, warn}; /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( db: DatabaseGuard, body: Ruma>, @@ -83,7 +82,6 @@ pub async fn join_room_by_id_route( /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, body: Ruma>, @@ -136,7 +134,6 @@ pub async fn join_room_by_id_or_alias_route( /// Tries to leave the sender user from a room. /// /// - This should always work if the user is currently joined. -#[tracing::instrument(skip(db, body))] pub async fn leave_room_route( db: DatabaseGuard, body: Ruma>, @@ -153,7 +150,6 @@ pub async fn leave_room_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` /// /// Tries to send an invite event into the room. -#[tracing::instrument(skip(db, body))] pub async fn invite_user_route( db: DatabaseGuard, body: Ruma>, @@ -172,7 +168,6 @@ pub async fn invite_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/kick` /// /// Tries to send a kick event into the room. -#[tracing::instrument(skip(db, body))] pub async fn kick_user_route( db: DatabaseGuard, body: Ruma>, @@ -232,7 +227,6 @@ pub async fn kick_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` /// /// Tries to send a ban event into the room. -#[tracing::instrument(skip(db, body))] pub async fn ban_user_route( db: DatabaseGuard, body: Ruma>, @@ -303,7 +297,6 @@ pub async fn ban_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` /// /// Tries to send an unban event into the room. -#[tracing::instrument(skip(db, body))] pub async fn unban_user_route( db: DatabaseGuard, body: Ruma>, @@ -367,7 +360,6 @@ pub async fn unban_user_route( /// /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device -#[tracing::instrument(skip(db, body))] pub async fn forget_room_route( db: DatabaseGuard, body: Ruma>, @@ -384,7 +376,6 @@ pub async fn forget_room_route( /// # `POST /_matrix/client/r0/joined_rooms` /// /// Lists all rooms the user has joined. -#[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( db: DatabaseGuard, body: Ruma, @@ -405,7 +396,6 @@ pub async fn joined_rooms_route( /// Lists all joined users in a room (TODO: at a specific point in time, with a specific membership). /// /// - Only works if the user is currently joined -#[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( db: DatabaseGuard, body: Ruma>, @@ -437,7 +427,6 @@ pub async fn get_member_events_route( /// /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined -#[tracing::instrument(skip(db, body))] pub async fn joined_members_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index c5982de1..93d5b3bb 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -18,7 +18,6 @@ use std::{ /// - Is a NOOP if the txn id was already used before and returns the same event id again /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed -#[tracing::instrument(skip(db, body))] pub async fn send_message_event_route( db: DatabaseGuard, body: Ruma>, @@ -103,7 +102,6 @@ pub async fn send_message_event_route( /// /// - Only works if the user is joined (TODO: always allow, but only show events where the user was /// joined, depending on history_visibility) -#[tracing::instrument(skip(db, body))] pub async fn get_message_events_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index aedff555..7549b1a7 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -5,7 +5,6 @@ use std::time::Duration; /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// /// Sets the presence state of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn set_presence_route( db: DatabaseGuard, body: Ruma>, @@ -47,7 +46,6 @@ pub async fn set_presence_route( /// Gets the presence state of the given user. /// /// - Only works if you share a room with the user -#[tracing::instrument(skip(db, body))] pub async fn get_presence_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index f520d2cb..33bfbb5c 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -19,7 +19,6 @@ use std::sync::Arc; /// Updates the displayname. /// /// - Also makes sure other users receive the update using presence EDUs -#[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( db: DatabaseGuard, body: Ruma>, @@ -117,7 +116,6 @@ pub async fn set_displayname_route( /// Returns the displayname of the user. /// /// - If user is on another server: Fetches displayname over federation -#[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( db: DatabaseGuard, body: Ruma>, @@ -150,7 +148,6 @@ pub async fn get_displayname_route( /// Updates the avatar_url and blurhash. /// /// - Also makes sure other users receive the update using presence EDUs -#[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( db: DatabaseGuard, body: Ruma>, @@ -250,7 +247,6 @@ pub async fn set_avatar_url_route( /// Returns the avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches avatar_url and blurhash over federation -#[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( db: DatabaseGuard, body: Ruma>, @@ -285,7 +281,6 @@ pub async fn get_avatar_url_route( /// Returns the displayname, avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches profile over federation -#[tracing::instrument(skip(db, body))] pub async fn get_profile_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 3bc46b85..67b70d28 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -15,7 +15,6 @@ use ruma::{ /// # `GET /_matrix/client/r0/pushrules` /// /// Retrieves the push rules event for this user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( db: DatabaseGuard, body: Ruma, @@ -38,7 +37,6 @@ pub async fn get_pushrules_all_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Retrieves a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( db: DatabaseGuard, body: Ruma>, @@ -91,7 +89,6 @@ pub async fn get_pushrule_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Creates a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_route( db: DatabaseGuard, body: Ruma>, @@ -188,7 +185,6 @@ pub async fn set_pushrule_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Gets the actions of a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( db: DatabaseGuard, body: Ruma>, @@ -245,7 +241,6 @@ pub async fn get_pushrule_actions_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( db: DatabaseGuard, body: Ruma>, @@ -313,7 +308,6 @@ pub async fn set_pushrule_actions_route( /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Gets the enabled status of a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( db: DatabaseGuard, body: Ruma>, @@ -373,7 +367,6 @@ pub async fn get_pushrule_enabled_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Sets the enabled status of a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( db: DatabaseGuard, body: Ruma>, @@ -446,7 +439,6 @@ pub async fn set_pushrule_enabled_route( /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Deletes a single specified push rule for this user. -#[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( db: DatabaseGuard, body: Ruma>, @@ -509,7 +501,6 @@ pub async fn delete_pushrule_route( /// # `GET /_matrix/client/r0/pushers` /// /// Gets all currently active pushers for the sender user. -#[tracing::instrument(skip(db, body))] pub async fn get_pushers_route( db: DatabaseGuard, body: Ruma, @@ -526,7 +517,6 @@ pub async fn get_pushers_route( /// Adds a pusher for the sender user. /// /// - TODO: Handle `append` -#[tracing::instrument(skip(db, body))] pub async fn set_pushers_route( db: DatabaseGuard, body: Ruma, diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index fa2627b1..cc6928d1 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -16,7 +16,6 @@ use std::collections::BTreeMap; /// /// - Updates fully-read account data event to `fully_read` /// - If `read_receipt` is set: Update private marker and public read receipt EDU -#[tracing::instrument(skip(db, body))] pub async fn set_read_marker_route( db: DatabaseGuard, body: Ruma>, @@ -82,7 +81,6 @@ pub async fn set_read_marker_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` /// /// Sets private read marker and public read receipt EDU. -#[tracing::instrument(skip(db, body))] pub async fn create_receipt_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 0a343e57..1e05bfe2 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -13,7 +13,6 @@ use serde_json::value::to_raw_value; /// Tries to send a redaction event into the room. /// /// - TODO: Handle txn id -#[tracing::instrument(skip(db, body))] pub async fn redact_event_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 680ad5a5..6274172c 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -9,7 +9,6 @@ use ruma::{ /// /// Reports an inappropriate event to homeserver admins /// -#[tracing::instrument(skip(db, body))] pub async fn report_event_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 4640cdab..54559e26 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -45,7 +45,6 @@ use tracing::{info, warn}; /// - Send events listed in initial state /// - Send events implied by `name` and `topic` /// - Send invite events -#[tracing::instrument(skip(db, body))] pub async fn create_room_route( db: DatabaseGuard, body: Ruma>, @@ -417,7 +416,6 @@ pub async fn create_room_route( /// Gets a single event. /// /// - You have to currently be joined to the room (TODO: Respect history visibility) -#[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( db: DatabaseGuard, body: Ruma>, @@ -445,7 +443,6 @@ pub async fn get_room_event_route( /// Lists all aliases of the room. /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable -#[tracing::instrument(skip(db, body))] pub async fn get_room_aliases_route( db: DatabaseGuard, body: Ruma>, @@ -478,7 +475,6 @@ pub async fn get_room_aliases_route( /// - Transfers some state events /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking -#[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 067eddce..5860484e 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -9,7 +9,6 @@ use std::collections::BTreeMap; /// Searches rooms for messages. /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) -#[tracing::instrument(skip(db, body))] pub async fn search_events_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/session.rs b/src/client_server/session.rs index dbcd28cb..c2259c26 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -23,7 +23,6 @@ struct Claims { /// /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. -#[tracing::instrument(skip(_body))] pub async fn get_login_types_route( _body: Ruma, ) -> Result { @@ -43,7 +42,6 @@ pub async fn get_login_types_route( /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -#[tracing::instrument(skip(db, body))] pub async fn login_route( db: DatabaseGuard, body: Ruma>, @@ -163,7 +161,6 @@ pub async fn login_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[tracing::instrument(skip(db, body))] pub async fn logout_route( db: DatabaseGuard, body: Ruma, @@ -189,7 +186,6 @@ pub async fn logout_route( /// /// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) /// from each device of this user. -#[tracing::instrument(skip(db, body))] pub async fn logout_all_route( db: DatabaseGuard, body: Ruma, diff --git a/src/client_server/state.rs b/src/client_server/state.rs index acc362fa..e334e7de 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -26,7 +26,6 @@ use ruma::{ /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( db: DatabaseGuard, body: Ruma>, @@ -56,7 +55,6 @@ pub async fn send_state_event_for_key_route( /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, body: Ruma>, @@ -92,7 +90,6 @@ pub async fn send_state_event_for_empty_key_route( /// Get all state events for a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( db: DatabaseGuard, body: Ruma>, @@ -139,7 +136,6 @@ pub async fn get_state_events_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( db: DatabaseGuard, body: Ruma>, @@ -190,7 +186,6 @@ pub async fn get_state_events_for_key_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 6410ce5d..360f015a 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -54,7 +54,6 @@ use tracing::error; /// /// - Sync is handled in an async task, multiple requests from the same device with the same /// `since` will be cached -#[tracing::instrument(skip(db, body))] pub async fn sync_events_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index edf86903..29bd9a0b 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -13,7 +13,6 @@ use std::collections::BTreeMap; /// Adds a tag to the room. /// /// - Inserts the tag into the tag event of the room account data. -#[tracing::instrument(skip(db, body))] pub async fn update_tag_route( db: DatabaseGuard, body: Ruma>, @@ -51,7 +50,6 @@ pub async fn update_tag_route( /// Deletes a tag from the room. /// /// - Removes the tag from the tag event of the room account data. -#[tracing::instrument(skip(db, body))] pub async fn delete_tag_route( db: DatabaseGuard, body: Ruma>, @@ -86,7 +84,6 @@ pub async fn delete_tag_route( /// Returns tags on the room. /// /// - Gets the tag event of the room account data. -#[tracing::instrument(skip(db, body))] pub async fn get_tags_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 929503ed..524f3bad 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -6,7 +6,6 @@ use std::collections::BTreeMap; /// # `GET /_matrix/client/r0/thirdparty/protocols` /// /// TODO: Fetches all metadata about protocols supported by the homeserver. -#[tracing::instrument(skip(_body))] pub async fn get_protocols_route( _body: Ruma, ) -> Result { diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 9f67bf00..e57998f6 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -13,7 +13,6 @@ use ruma::{ /// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` /// /// Send a to-device event to a set of client devices. -#[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 6c1939a7..bbc852d2 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -5,7 +5,6 @@ use ruma::api::client::r0::typing::create_typing_event; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. -#[tracing::instrument(skip(db, body))] pub async fn create_typing_event_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 65becda6..168f172a 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -13,7 +13,6 @@ use ruma::api::client::unversioned::get_supported_versions; /// /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases -#[tracing::instrument(skip(_body))] pub async fn get_supported_versions_route( _body: Ruma, ) -> Result { diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index a3df5839..cecba7f2 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -6,7 +6,6 @@ use ruma::api::client::r0::user_directory::search_users; /// Searches all known users for a match. /// /// - TODO: Hide users that are not in any public rooms? -#[tracing::instrument(skip(db, body))] pub async fn search_users_route( db: DatabaseGuard, body: Ruma>, diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index f3262abf..e9a553a9 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -9,7 +9,6 @@ type HmacSha1 = Hmac; /// # `GET /_matrix/client/r0/voip/turnServer` /// /// TODO: Returns information about the recommended turn server. -#[tracing::instrument(skip(body, db))] pub async fn turn_server_route( db: DatabaseGuard, body: Ruma, diff --git a/src/server_server.rs b/src/server_server.rs index 42e44c6c..5c00aab2 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -301,7 +301,6 @@ where } } -#[tracing::instrument] fn get_ip_with_port(destination_str: &str) -> Option { if let Ok(destination) = destination_str.parse::() { Some(FedDest::Literal(destination)) @@ -312,7 +311,6 @@ fn get_ip_with_port(destination_str: &str) -> Option { } } -#[tracing::instrument] fn add_port_to_hostname(destination_str: &str) -> FedDest { let (host, port) = match destination_str.find(':') { None => (destination_str, ":8448"), @@ -490,7 +488,6 @@ async fn request_well_known( /// # `GET /_matrix/federation/v1/version` /// /// Get version information on this server. -#[tracing::instrument(skip(db, _body))] pub async fn get_server_version_route( db: DatabaseGuard, _body: Ruma, @@ -514,7 +511,6 @@ pub async fn get_server_version_route( /// - Matrix does not support invalidating public keys, so the key returned by this will be valid /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response -#[tracing::instrument(skip(db))] pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -564,7 +560,6 @@ pub async fn get_server_keys_route(db: DatabaseGuard) -> Result impl IntoResponse { get_server_keys_route(db).await } @@ -572,7 +567,6 @@ pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoRes /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, body: Ruma>, @@ -613,7 +607,6 @@ pub async fn get_public_rooms_filtered_route( /// # `GET /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( db: DatabaseGuard, body: Ruma>, @@ -654,7 +647,6 @@ pub async fn get_public_rooms_route( /// # `PUT /_matrix/federation/v1/send/{txnId}` /// /// Push EDUs and PDUs to this server. -#[tracing::instrument(skip(db, body))] pub async fn send_transaction_message_route( db: DatabaseGuard, body: Ruma>, @@ -1075,7 +1067,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( .await } -#[tracing::instrument(skip(origin, create_event, event_id, room_id, value, db, pub_key_map))] +#[tracing::instrument(skip_all)] fn handle_outlier_pdu<'a>( origin: &'a ServerName, create_event: &'a PduEvent, @@ -1237,7 +1229,7 @@ fn handle_outlier_pdu<'a>( }) } -#[tracing::instrument(skip(incoming_pdu, val, create_event, origin, db, room_id, pub_key_map))] +#[tracing::instrument(skip_all)] async fn upgrade_outlier_to_timeline_pdu( incoming_pdu: Arc, val: BTreeMap, @@ -1780,7 +1772,7 @@ async fn upgrade_outlier_to_timeline_pdu( /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? -#[tracing::instrument(skip(db, origin, events, create_event, room_id, pub_key_map))] +#[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( db: &'a Database, origin: &'a ServerName, @@ -1921,7 +1913,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. -#[tracing::instrument(skip(db, origin, signature_ids))] +#[tracing::instrument(skip_all)] pub(crate) async fn fetch_signing_keys( db: &Database, origin: &ServerName, @@ -2080,7 +2072,7 @@ pub(crate) async fn fetch_signing_keys( /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. -#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))] +#[tracing::instrument(skip_all)] fn append_incoming_pdu<'a>( db: &Database, pdu: &PduEvent, @@ -2284,7 +2276,6 @@ fn get_auth_chain_inner( /// Retrieves a single event from the server. /// /// - Only works if a user of this server is currently invited or joined the room -#[tracing::instrument(skip(db, body))] pub async fn get_event_route( db: DatabaseGuard, body: Ruma>, @@ -2328,7 +2319,6 @@ pub async fn get_event_route( /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. -#[tracing::instrument(skip(db, body))] pub async fn get_missing_events_route( db: DatabaseGuard, body: Ruma>, @@ -2402,7 +2392,6 @@ pub async fn get_missing_events_route( /// Retrieves the auth chain for a given event. /// /// - This does not include the event itself -#[tracing::instrument(skip(db, body))] pub async fn get_event_authorization_route( db: DatabaseGuard, body: Ruma>, @@ -2451,7 +2440,6 @@ pub async fn get_event_authorization_route( /// # `GET /_matrix/federation/v1/state/{roomId}` /// /// Retrieves the current state of the room. -#[tracing::instrument(skip(db, body))] pub async fn get_room_state_route( db: DatabaseGuard, body: Ruma>, @@ -2511,7 +2499,6 @@ pub async fn get_room_state_route( /// # `GET /_matrix/federation/v1/state_ids/{roomId}` /// /// Retrieves the current state of the room. -#[tracing::instrument(skip(db, body))] pub async fn get_room_state_ids_route( db: DatabaseGuard, body: Ruma>, @@ -2560,7 +2547,6 @@ pub async fn get_room_state_ids_route( /// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` /// /// Creates a join template. -#[tracing::instrument(skip(db, body))] pub async fn create_join_event_template_route( db: DatabaseGuard, body: Ruma>, @@ -2841,7 +2827,6 @@ async fn create_join_event( /// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -#[tracing::instrument(skip(db, body))] pub async fn create_join_event_v1_route( db: DatabaseGuard, body: Ruma>, @@ -2859,7 +2844,6 @@ pub async fn create_join_event_v1_route( /// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` /// /// Submits a signed join event. -#[tracing::instrument(skip(db, body))] pub async fn create_join_event_v2_route( db: DatabaseGuard, body: Ruma>, @@ -2877,7 +2861,6 @@ pub async fn create_join_event_v2_route( /// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` /// /// Invites a remote user to a room. -#[tracing::instrument(skip(db, body))] pub async fn create_invite_route( db: DatabaseGuard, body: Ruma>, @@ -2988,7 +2971,6 @@ pub async fn create_invite_route( /// # `GET /_matrix/federation/v1/user/devices/{userId}` /// /// Gets information on all devices of the user. -#[tracing::instrument(skip(db, body))] pub async fn get_devices_route( db: DatabaseGuard, body: Ruma>, @@ -3026,7 +3008,6 @@ pub async fn get_devices_route( /// # `GET /_matrix/federation/v1/query/directory` /// /// Resolve a room alias to a room id. -#[tracing::instrument(skip(db, body))] pub async fn get_room_information_route( db: DatabaseGuard, body: Ruma>, @@ -3052,7 +3033,6 @@ pub async fn get_room_information_route( /// # `GET /_matrix/federation/v1/query/profile` /// /// Gets information on a profile. -#[tracing::instrument(skip(db, body))] pub async fn get_profile_information_route( db: DatabaseGuard, body: Ruma>, @@ -3090,7 +3070,6 @@ pub async fn get_profile_information_route( /// # `POST /_matrix/federation/v1/user/keys/query` /// /// Gets devices and identity keys for the given users. -#[tracing::instrument(skip(db, body))] pub async fn get_keys_route( db: DatabaseGuard, body: Ruma, @@ -3119,7 +3098,6 @@ pub async fn get_keys_route( /// # `POST /_matrix/federation/v1/user/keys/claim` /// /// Claims one-time keys. -#[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( db: DatabaseGuard, body: Ruma, @@ -3137,7 +3115,7 @@ pub async fn claim_keys_route( }) } -#[tracing::instrument(skip(event, pub_key_map, db))] +#[tracing::instrument(skip_all)] pub(crate) async fn fetch_required_signing_keys( event: &BTreeMap, pub_key_map: &RwLock>>, From 0ad6eac4f820c8a69dabff984f66b95abbcfb597 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sat, 12 Feb 2022 16:28:43 +0100 Subject: [PATCH 199/445] Remove all tracing::instrument attributes from database::abstraction::* --- src/database/abstraction/heed.rs | 10 ---------- src/database/abstraction/persy.rs | 10 ---------- src/database/abstraction/sled.rs | 1 - src/database/abstraction/sqlite.rs | 13 ------------- 4 files changed, 34 deletions(-) diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs index 83dafc57..9cca0975 100644 --- a/src/database/abstraction/heed.rs +++ b/src/database/abstraction/heed.rs @@ -69,7 +69,6 @@ impl DatabaseEngine for Engine { } impl EngineTree { - #[tracing::instrument(skip(self, tree, from, backwards))] fn iter_from_thread( &self, tree: Arc, @@ -94,7 +93,6 @@ impl EngineTree { } } -#[tracing::instrument(skip(tree, txn, from, backwards))] fn iter_from_thread_work( tree: Arc, txn: &heed::RoTxn<'_>, @@ -126,7 +124,6 @@ fn iter_from_thread_work( } impl Tree for EngineTree { - #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { let txn = self.engine.env.read_txn().map_err(convert_error)?; Ok(self @@ -136,7 +133,6 @@ impl Tree for EngineTree { .map(|s| s.to_vec())) } - #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; self.tree @@ -147,7 +143,6 @@ impl Tree for EngineTree { Ok(()) } - #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; self.tree.delete(&mut txn, &key).map_err(convert_error)?; @@ -155,12 +150,10 @@ impl Tree for EngineTree { Ok(()) } - #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a> { self.iter_from(&[], false) } - #[tracing::instrument(skip(self, from, backwards))] fn iter_from( &self, from: &[u8], @@ -169,7 +162,6 @@ impl Tree for EngineTree { self.iter_from_thread(Arc::clone(&self.tree), from.to_vec(), backwards) } - #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; @@ -186,7 +178,6 @@ impl Tree for EngineTree { Ok(new) } - #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>( &'a self, prefix: Vec, @@ -197,7 +188,6 @@ impl Tree for EngineTree { ) } - #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { self.watchers.watch(prefix) } diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs index 628cf32b..e78e731d 100644 --- a/src/database/abstraction/persy.rs +++ b/src/database/abstraction/persy.rs @@ -62,7 +62,6 @@ impl PersyTree { } impl Tree for PersyTree { - #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { let result = self .persy @@ -72,14 +71,12 @@ impl Tree for PersyTree { Ok(result) } - #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?; self.watchers.wake(key); Ok(()) } - #[tracing::instrument(skip(self, iter))] fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { let mut tx = self.begin()?; for (key, value) in iter { @@ -93,7 +90,6 @@ impl Tree for PersyTree { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { let mut tx = self.begin()?; for key in iter { @@ -108,7 +104,6 @@ impl Tree for PersyTree { Ok(()) } - #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let mut tx = self.begin()?; tx.remove::(&self.name, ByteVec::from(key), None)?; @@ -116,7 +111,6 @@ impl Tree for PersyTree { Ok(()) } - #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { let iter = self.persy.range::(&self.name, ..); match iter { @@ -132,7 +126,6 @@ impl Tree for PersyTree { } } - #[tracing::instrument(skip(self, from, backwards))] fn iter_from<'a>( &'a self, from: &[u8], @@ -165,13 +158,11 @@ impl Tree for PersyTree { } } - #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { self.increment_batch(&mut Some(key.to_owned()).into_iter())?; Ok(self.get(key)?.unwrap()) } - #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>( &'a self, prefix: Vec, @@ -200,7 +191,6 @@ impl Tree for PersyTree { } } - #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { self.watchers.watch(prefix) } diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs index 35ba1b29..87defc57 100644 --- a/src/database/abstraction/sled.rs +++ b/src/database/abstraction/sled.rs @@ -39,7 +39,6 @@ impl Tree for SledEngineTree { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { for (key, value) in iter { self.0.insert(key, value)?; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index d4aab7dd..730c1bca 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -134,7 +134,6 @@ pub struct SqliteTable { type TupleOfBytes = (Vec, Vec); impl SqliteTable { - #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { //dbg!(&self.name); Ok(guard @@ -143,7 +142,6 @@ impl SqliteTable { .optional()?) } - #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { //dbg!(&self.name); guard.execute( @@ -192,12 +190,10 @@ impl SqliteTable { } impl Tree for SqliteTable { - #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { self.get_with_guard(self.engine.read_lock(), key) } - #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); self.insert_with_guard(&guard, key, value)?; @@ -206,7 +202,6 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { let guard = self.engine.write_lock(); @@ -221,7 +216,6 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { let guard = self.engine.write_lock(); @@ -239,7 +233,6 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); @@ -251,14 +244,12 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box + 'a> { let guard = self.engine.read_lock_iterator(); self.iter_with_guard(guard) } - #[tracing::instrument(skip(self, from, backwards))] fn iter_from<'a>( &'a self, from: &[u8], @@ -323,7 +314,6 @@ impl Tree for SqliteTable { } } - #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { let guard = self.engine.write_lock(); @@ -337,7 +327,6 @@ impl Tree for SqliteTable { Ok(new) } - #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box + 'a> { Box::new( self.iter_from(&prefix, false) @@ -345,12 +334,10 @@ impl Tree for SqliteTable { ) } - #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { self.watchers.watch(prefix) } - #[tracing::instrument(skip(self))] fn clear(&self) -> Result<()> { debug!("clear: running"); self.engine From 0ed1e42aed9c88d467f05252177df18a69a0fae1 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 12 Feb 2022 21:01:53 +0100 Subject: [PATCH 200/445] update ruma --- Cargo.lock | 37 +++++++++++++++++++------------------ Cargo.toml | 2 +- 2 files changed, 20 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index a56103e6..dbc29ad9 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "assign", "js_int", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "bytes", "http", @@ -2113,12 +2113,13 @@ dependencies = [ "serde", "serde_json", "thiserror", + "tracing", ] [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2129,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "ruma-api", "ruma-common", @@ -2143,7 +2144,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "assign", "bytes", @@ -2163,7 +2164,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "indexmap", "js_int", @@ -2178,7 +2179,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "indoc", "js_int", @@ -2195,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2206,7 +2207,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "js_int", "ruma-api", @@ -2221,7 +2222,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2236,7 +2237,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2246,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "thiserror", ] @@ -2254,7 +2255,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "js_int", "ruma-api", @@ -2267,7 +2268,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "js_int", "ruma-api", @@ -2282,7 +2283,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "base64 0.13.0", "bytes", @@ -2297,7 +2298,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2308,7 +2309,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2325,7 +2326,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=f130d09daabf021ad30750eed89483a0f45f820a#f130d09daabf021ad30750eed89483a0f45f820a" +source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index aa6bdbb9..bcdf01ae 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f130d09daabf021ad30750eed89483a0f45f820a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "f72d6601fcf2ce4382a7c02b740d60a6e803f4d9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } From 35b82d51cf3a76ab2ab2c240c061a9b421f046d7 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 12 Feb 2022 21:04:38 +0100 Subject: [PATCH 201/445] fix compilations --- src/appservice_server.rs | 8 ++++++-- src/database/pusher.rs | 8 ++++++-- src/server_server.rs | 9 +++++++-- 3 files changed, 19 insertions(+), 6 deletions(-) diff --git a/src/appservice_server.rs b/src/appservice_server.rs index 8d6d0527..ce122dad 100644 --- a/src/appservice_server.rs +++ b/src/appservice_server.rs @@ -1,6 +1,6 @@ use crate::{utils, Error, Result}; use bytes::BytesMut; -use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; +use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; @@ -17,7 +17,11 @@ where let hs_token = registration.get("hs_token").unwrap().as_str().unwrap(); let mut http_request = request - .try_into_http_request::(destination, SendAccessToken::IfRequired("")) + .try_into_http_request::( + destination, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) .unwrap() .map(|body| body.freeze()); diff --git a/src/database/pusher.rs b/src/database/pusher.rs index e73ab061..bc7017b0 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -7,7 +7,7 @@ use ruma::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - IncomingResponse, OutgoingRequest, SendAccessToken, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, @@ -101,7 +101,11 @@ where let destination = destination.replace("/_matrix/push/v1/notify", ""); let http_request = request - .try_into_http_request::(&destination, SendAccessToken::IfRequired("")) + .try_into_http_request::( + &destination, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) .map_err(|e| { warn!("Failed to find destination {}: {}", destination, e); Error::BadServerResponse("Invalid destination") diff --git a/src/server_server.rs b/src/server_server.rs index 5c00aab2..39210555 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -34,7 +34,8 @@ use ruma::{ send_transaction_message, }, }, - EndpointError, IncomingResponse, OutgoingRequest, OutgoingResponse, SendAccessToken, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, + SendAccessToken, }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ @@ -155,7 +156,11 @@ where let actual_destination_str = actual_destination.clone().into_https_string(); let mut http_request = request - .try_into_http_request::>(&actual_destination_str, SendAccessToken::IfRequired("")) + .try_into_http_request::>( + &actual_destination_str, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) .map_err(|e| { warn!( "Failed to find destination {}: {}", From d4217007fe311896e8e685e0c95c50a50336d486 Mon Sep 17 00:00:00 2001 From: M0dEx Date: Sat, 12 Feb 2022 21:40:07 +0100 Subject: [PATCH 202/445] fix: do not panic on a JSON not containing the PDU Do not panic on a JSON not containing the PDU when executing the parse-pdu admin command. --- src/database/admin.rs | 36 +++++++++++++++++++----------------- 1 file changed, 19 insertions(+), 17 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 50fac3e0..f9d4f425 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -354,24 +354,26 @@ fn process_admin_command( let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { - let event_id = EventId::parse(format!( - "${}", - // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &RoomVersionId::V6) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - Ok(pdu) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\n{:#?}", - event_id, pdu - )), + match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + Ok(hash) => { + let event_id = EventId::parse(format!("${}", hash)); + + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + Ok(pdu) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\n{:#?}", + event_id, pdu + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), + } + } Err(e) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\nCould not parse event: {}", - event_id, e + "Could not parse PDU JSON: {:?}", + e )), } } From b8d92d3cec4905265c1ef6aa9b03f1433e7d5637 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sun, 13 Feb 2022 12:07:00 +0100 Subject: [PATCH 203/445] take advantage of multiple paths --- Cargo.lock | 36 ++++++++++++++++++------------------ Cargo.toml | 2 +- src/main.rs | 14 ++++++++++---- 3 files changed, 29 insertions(+), 23 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index dbc29ad9..e7ffe5bd 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "assign", "js_int", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "bytes", "http", @@ -2119,7 +2119,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "ruma-api", "ruma-common", @@ -2144,7 +2144,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "assign", "bytes", @@ -2164,7 +2164,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "indexmap", "js_int", @@ -2179,7 +2179,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "indoc", "js_int", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2207,7 +2207,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "js_int", "ruma-api", @@ -2222,7 +2222,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2237,7 +2237,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2247,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "thiserror", ] @@ -2255,7 +2255,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "js_int", "ruma-api", @@ -2268,7 +2268,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "js_int", "ruma-api", @@ -2283,7 +2283,7 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "base64 0.13.0", "bytes", @@ -2298,7 +2298,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2309,7 +2309,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2326,7 +2326,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=f72d6601fcf2ce4382a7c02b740d60a6e803f4d9#f72d6601fcf2ce4382a7c02b740d60a6e803f4d9" +source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index bcdf01ae..ab7b47d0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "f72d6601fcf2ce4382a7c02b740d60a6e803f4d9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "aed09886946f8817a478981cae1b6b8b5d4e7b7d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/main.rs b/src/main.rs index 22ddf3e0..828d7dc6 100644 --- a/src/main.rs +++ b/src/main.rs @@ -387,13 +387,19 @@ macro_rules! impl_ruma_handler { E: IntoResponse, $( $ty: FromRequest + Send + 'static, )* { - fn add_to_router(self, router: Router) -> Router { + fn add_to_router(self, mut router: Router) -> Router { let meta = Req::Incoming::METADATA; let method_filter = method_to_filter(meta.method); - router.route(meta.path, on(method_filter, |$( $ty: $ty, )* req| async move { - self($($ty,)* req).await.map(RumaResponse) - })) + for path in IntoIterator::into_iter([meta.unstable_path, meta.r0_path, meta.stable_path]).flatten() { + let this = self.clone(); + + router = router.route(path, on(method_filter, |$( $ty: $ty, )* req| async move { + this($($ty,)* req).await.map(RumaResponse) + })) + } + + router } } }; From aee6bf7e7aedb250911f79f43d56bac934c64381 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 13 Feb 2022 11:30:04 +0000 Subject: [PATCH 204/445] Change this to handler --- src/main.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 828d7dc6..a96bef50 100644 --- a/src/main.rs +++ b/src/main.rs @@ -392,10 +392,10 @@ macro_rules! impl_ruma_handler { let method_filter = method_to_filter(meta.method); for path in IntoIterator::into_iter([meta.unstable_path, meta.r0_path, meta.stable_path]).flatten() { - let this = self.clone(); + let handler = self.clone(); router = router.route(path, on(method_filter, |$( $ty: $ty, )* req| async move { - this($($ty,)* req).await.map(RumaResponse) + handler($($ty,)* req).await.map(RumaResponse) })) } From 3aece38e9dfab762efc52afe066ce418c36e673a Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Sun, 13 Feb 2022 13:59:27 +0100 Subject: [PATCH 205/445] Add a not-found route --- src/main.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index a96bef50..6aa08704 100644 --- a/src/main.rs +++ b/src/main.rs @@ -11,6 +11,7 @@ use std::{future::Future, io, net::SocketAddr, sync::Arc, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, + handler::Handler, response::IntoResponse, routing::{get, on, MethodFilter}, Router, @@ -22,10 +23,13 @@ use figment::{ }; use http::{ header::{self, HeaderName}, - Method, + Method, Uri, }; use opentelemetry::trace::{FutureExt, Tracer}; -use ruma::{api::IncomingRequest, Outgoing}; +use ruma::{ + api::{client::error::ErrorKind, IncomingRequest}, + Outgoing, +}; use tokio::{signal, sync::RwLock}; use tower::ServiceBuilder; use tower_http::{ @@ -321,6 +325,7 @@ fn routes() -> Router { .ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_keys_route) .ruma_route(server_server::claim_keys_route) + .fallback(not_found.into_service()) } async fn shutdown_signal(handle: ServerHandle) { @@ -349,6 +354,10 @@ async fn shutdown_signal(handle: ServerHandle) { handle.graceful_shutdown(Some(Duration::from_secs(30))); } +async fn not_found(_uri: Uri) -> impl IntoResponse { + Error::BadRequest(ErrorKind::NotFound, "Unknown or unimplemented route") +} + trait RouterExt { fn ruma_route(self, handler: H) -> Self where From 6602f6114c59e47d0c2ff605493a0f7e4ffeba3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 10 Feb 2022 20:59:11 +0100 Subject: [PATCH 206/445] fix: redacts can't error anymore --- src/database/rooms.rs | 8 ++------ src/server_server.rs | 10 ++++++++-- 2 files changed, 10 insertions(+), 8 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 1f6b4316..c751167a 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2147,13 +2147,9 @@ impl Rooms { .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; pdu.redact(reason)?; self.replace_pdu(&pdu_id, &pdu)?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "Event ID does not exist.", - )) } + // If event does not exist, just noop + Ok(()) } /// Update current membership data. diff --git a/src/server_server.rs b/src/server_server.rs index 39210555..9f0e922b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1593,7 +1593,10 @@ async fn upgrade_outlier_to_timeline_pdu( soft_fail, &state_lock, ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?; + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); @@ -1759,7 +1762,10 @@ async fn upgrade_outlier_to_timeline_pdu( soft_fail, &state_lock, ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?; + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; debug!("Appended incoming pdu."); From 77f4b68c8e9f610a6960daaaf6502a7da9936130 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 15 Feb 2022 11:17:32 +0100 Subject: [PATCH 207/445] fix(ci): Also create versioned docker image --- .gitlab-ci.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8d701c2a..40716fa3 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -220,6 +220,20 @@ docker:master:dockerhub: variables: TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" +docker:tags:gitlab: + extends: .docker-shared-settings + rules: + - if: "$CI_COMMIT_TAG" + variables: + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:$CI_COMMIT_TAG" + +docker:tags:dockerhub: + extends: .docker-shared-settings + rules: + - if: "$CI_COMMIT_TAG && $DOCKER_HUB" + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG" + # --------------------------------------------------------------------- # # Run tests # # --------------------------------------------------------------------- # From 2645494582f75a8b51391f0b270d5131ba59df34 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 15 Feb 2022 11:17:46 +0100 Subject: [PATCH 208/445] fix(ci): Also run CI for git tags --- .gitlab-ci.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 40716fa3..71511ef5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -327,3 +327,4 @@ workflow: - if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS" when: never - if: "$CI_COMMIT_BRANCH" + - if: "$CI_COMMIT_TAG" From b21a44ca4cd5c5064f2991f62bd3c48074c4148b Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 15 Feb 2022 18:33:20 +0100 Subject: [PATCH 209/445] feat(ci): Lint dockerfiles with hadolint --- .gitlab-ci.yml | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 71511ef5..bd4ce791 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -272,6 +272,7 @@ test:sytest: tags: ["docker"] variables: PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" + interruptible: true before_script: - "mkdir -p /app" - "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit" @@ -292,6 +293,40 @@ test:sytest: reports: junit: "$CI_PROJECT_DIR/sytest.xml" +test:dockerlint: + stage: "test" + needs: [] + image: "ghcr.io/hadolint/hadolint:latest-alpine" + interruptible: true + script: + # First pass: Print for CI log: + - > + hadolint + --no-fail --verbose + ./Dockerfile + ./docker/ci-binaries-packaging.Dockerfile + # Then output the results into a json for GitLab to pretty-print this in the MR: + - > + hadolint + --format gitlab_codeclimate + --failure-threshold error + ./Dockerfile + ./docker/ci-binaries-packaging.Dockerfile > dockerlint.json + artifacts: + when: always + reports: + codequality: dockerlint.json + paths: + - dockerlint.json + rules: + - if: '$CI_COMMIT_REF_NAME != "master"' + changes: + - docker/*Dockerfile + - Dockerfile + - .gitlab-ci.yml + - if: '$CI_COMMIT_REF_NAME == "master"' + - if: '$CI_COMMIT_REF_NAME == "next"' + # --------------------------------------------------------------------- # # Store binaries as package so they have download urls # # --------------------------------------------------------------------- # From de6c3312ceca9d0f9c0d2041c16a46d6b538b2a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 12 Feb 2022 10:29:04 +0100 Subject: [PATCH 210/445] docs: make all configs match --- DEPLOY.md | 27 +++++++++++----------- Dockerfile | 15 +++++++++--- conduit-example.toml | 18 +++------------ debian/postinst | 38 ++++++++++++++++--------------- docker-compose.yml | 31 ++++++++++++------------- docker/README.md | 2 +- docker/docker-compose.traefik.yml | 31 ++++++++++--------------- 7 files changed, 76 insertions(+), 86 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index eecf5136..0657c0c0 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -93,24 +93,30 @@ to read it. You need to change at least the server name.** ```toml [global] -# The server_name is the name of this server. It is used as a suffix for user +# The server_name is the pretty name of this server. It is used as a suffix for user # and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server # for more information # YOU NEED TO EDIT THIS #server_name = "your.server.name" # This is the only directory where Conduit will save its data -database_path = "/var/lib/matrix-conduit/conduit_db" +database_path = "/var/lib/matrix-conduit/" +database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port # 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = 6167 # Max size for uploads @@ -119,20 +125,15 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created -# Note: existing rooms will continue to work -allow_encryption = true allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#workers = 4 # default: cpu core count * 2 +#log = "info,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy - -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 +#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. ``` ## Setting the correct file permissions diff --git a/Dockerfile b/Dockerfile index b631f297..34a07665 100644 --- a/Dockerfile +++ b/Dockerfile @@ -35,9 +35,18 @@ FROM docker.io/debian:bullseye-slim AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs. -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ - CONDUIT_PORT=6167 +ENV CONDUIT_SERVER_NAME=your.server.name # EDIT THIS +ENV CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit +ENV CONDUIT_DATABASE_BACKEND=rocksdb +ENV CONDUIT_PORT=6167 +ENV CONDUIT_MAX_REQUEST_SIZE=20_000_000 # in bytes, ~20 MB +ENV CONDUIT_ALLOW_REGISTRATION=true +ENV CONDUIT_ALLOW_FEDERATION=true +ENV CONDUIT_TRUSTED_SERVERS=["matrix.org"] +#ENV CONDUIT_MAX_CONCURRENT_REQUESTS=100 +#ENV CONDUIT_LOG=info,rocket=off,_=off,sled=off +ENV CONDUIT_ADDRESS=0.0.0.0 +ENV CONDUIT_CONFIG='' # Ignore this # Conduit needs: # ca-certificates: for https diff --git a/conduit-example.toml b/conduit-example.toml index c22c8622..23c18446 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -16,7 +16,7 @@ #server_name = "your.server.name" # This is the only directory where Conduit will save its data -database_path = "/var/lib/conduit/" +database_path = "/var/lib/matrix-conduit/" database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in @@ -31,24 +31,12 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created -# Note: existing rooms will continue to work -#allow_encryption = false -#allow_federation = false - -# Enable jaeger to support monitoring and troubleshooting through jaeger -#allow_jaeger = false +allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,_=off,sled=off" -#workers = 4 # default: cpu core count * 2 +#log = "info,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. - -proxy = "none" # more examples can be found at src/database/proxy.rs:6 - -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 diff --git a/debian/postinst b/debian/postinst index 29a93676..10d5561c 100644 --- a/debian/postinst +++ b/debian/postinst @@ -36,18 +36,24 @@ case "$1" in mkdir -p "$CONDUIT_CONFIG_PATH" cat > "$CONDUIT_CONFIG_FILE" << EOF [global] -# The server_name is the name of this server. It is used as a suffix for user -# and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See +# The server_name is the pretty name of this server. It is used as a suffix for +# user and room ids. Examples: matrix.org, conduit.rs + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server -# for more information. +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# for more information + server_name = "${CONDUIT_SERVER_NAME}" # This is the only directory where Conduit will save its data. database_path = "${CONDUIT_DATABASE_PATH}" +database_backend = "rocksdb" # The address Conduit will be listening on. # By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to @@ -56,7 +62,8 @@ address = "${CONDUIT_ADDRESS}" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port -# 443 and 8448 will be forwarded to the Conduit instance running on this port. +# 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = ${CONDUIT_PORT} # Max size for uploads @@ -65,20 +72,15 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created. -# Note: Existing rooms will continue to work. -#allow_encryption = false -#allow_federation = false +allow_federation = true -# Enable jaeger to support monitoring and troubleshooting through jaeger. -#allow_jaeger = false +trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,_=off,sled=off" -#workers = 4 # default: cpu core count * 2 +#log = "info,state_res=warn,rocket=off,_=off,sled=off" -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 +address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy +#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. EOF fi ;; diff --git a/docker-compose.yml b/docker-compose.yml index 88d5c3f6..5a17a8d2 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,27 +20,24 @@ services: ports: - 8448:6167 volumes: - - db:/srv/conduit/.local/share/conduit + - db:/var/lib/matrix-conduit/ ### Uncomment if you want to use conduit.toml to configure Conduit ### Note: Set env vars will override conduit.toml values # - ./conduit.toml:/srv/conduit/conduit.toml environment: - CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name - CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' - ### Uncomment and change values as desired - # CONDUIT_ADDRESS: 0.0.0.0 - # CONDUIT_PORT: 6167 - # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' - # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,_=off,sled=off" - # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_ENCRYPTION: 'false' - # CONDUIT_ALLOW_FEDERATION: 'false' - # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit - # CONDUIT_WORKERS: 10 - # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB - + CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 + #CONDUIT_LOG: info,rocket=off,_=off,sled=off + CONDUIT_ADDRESS: 0.0.0.0 + CONDUIT_CONFIG: '' # Ignore this + # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second ### Domain or Subdomain for the communication between Element and Conduit @@ -56,4 +53,4 @@ services: # - homeserver volumes: - db: + db: diff --git a/docker/README.md b/docker/README.md index d8867385..14758fd7 100644 --- a/docker/README.md +++ b/docker/README.md @@ -112,4 +112,4 @@ So...step by step: ``` 6. Run `docker-compose up -d` -7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin. +7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.traefik.yml index f625080a..ca560b89 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.traefik.yml @@ -18,28 +18,22 @@ services: # GIT_REF: origin/master restart: unless-stopped volumes: - - db:/srv/conduit/.local/share/conduit - ### Uncomment if you want to use conduit.toml to configure Conduit - ### Note: Set env vars will override conduit.toml values - # - ./conduit.toml:/srv/conduit/conduit.toml + - db:/var/lib/matrix-conduit/ networks: - proxy environment: - CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUIT_ALLOW_REGISTRATION: 'true' + CONDUIT_ALLOW_FEDERATION: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' - CONDUIT_ALLOW_REGISTRATION : 'true' - ### Uncomment and change values as desired - # CONDUIT_ADDRESS: 0.0.0.0 - # CONDUIT_PORT: 6167 - # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' - # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,_=off,sled=off" - # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_ENCRYPTION: 'false' - # CONDUIT_ALLOW_FEDERATION: 'false' - # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit - # CONDUIT_WORKERS: 10 - # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 + #CONDUIT_LOG: info,rocket=off,_=off,sled=off + CONDUIT_ADDRESS: 0.0.0.0 + CONDUIT_CONFIG: '' # Ignore this # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container # to serve those two as static files. If you want to use a different way, delete or comment the below service, here @@ -50,7 +44,6 @@ services: volumes: - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files - ./nginx/www:/var/www/ # location of the client and server .well-known-files - ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second ### Domain or Subdomain for the communication between Element and Conduit From c4353405a5c457b8301de123c646e748a07f8a22 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 13 Feb 2022 12:15:40 +0000 Subject: [PATCH 211/445] Suggestions from Jonas Zohren --- DEPLOY.md | 4 ++-- debian/postinst | 2 +- docker-compose.yml | 3 --- docker/README.md | 2 +- 4 files changed, 4 insertions(+), 7 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 0657c0c0..a28218d7 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -148,8 +148,8 @@ sudo chown -R conduit:nogroup /etc/matrix-conduit If you use the default database path you also need to run this: ```bash -sudo mkdir -p /var/lib/matrix-conduit/conduit_db -sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db +sudo mkdir -p /var/lib/matrix-conduit/ +sudo chown -R conduit:nogroup /var/lib/matrix-conduit/ ``` ## Setting up the Reverse Proxy diff --git a/debian/postinst b/debian/postinst index 10d5561c..378f99ed 100644 --- a/debian/postinst +++ b/debian/postinst @@ -5,7 +5,7 @@ set -e CONDUIT_CONFIG_PATH=/etc/matrix-conduit CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml" -CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/conduit_db +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/ case "$1" in configure) diff --git a/docker-compose.yml b/docker-compose.yml index 5a17a8d2..0a9d8f4d 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -21,9 +21,6 @@ services: - 8448:6167 volumes: - db:/var/lib/matrix-conduit/ - ### Uncomment if you want to use conduit.toml to configure Conduit - ### Note: Set env vars will override conduit.toml values - # - ./conduit.toml:/srv/conduit/conduit.toml environment: CONDUIT_SERVER_NAME: your.server.name # EDIT THIS CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ diff --git a/docker/README.md b/docker/README.md index 14758fd7..28ad06f3 100644 --- a/docker/README.md +++ b/docker/README.md @@ -24,7 +24,7 @@ which also will tag the resulting image as `matrixconduit/matrix-conduit:latest` After building the image you can simply run it with ```bash -docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest +docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/var/lib/matrix-conduit/ matrixconduit/matrix-conduit:latest ``` or you can skip the build step and pull the image from one of the following registries: From 97507d28806e7a10cb4ffc9ab4cc64b902b267ef Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 13 Feb 2022 12:25:19 +0000 Subject: [PATCH 212/445] Remove most env vars from Dockerfile --- Dockerfile | 15 +++------------ 1 file changed, 3 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index 34a07665..82ee9516 100644 --- a/Dockerfile +++ b/Dockerfile @@ -35,18 +35,9 @@ FROM docker.io/debian:bullseye-slim AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -ENV CONDUIT_SERVER_NAME=your.server.name # EDIT THIS -ENV CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit -ENV CONDUIT_DATABASE_BACKEND=rocksdb -ENV CONDUIT_PORT=6167 -ENV CONDUIT_MAX_REQUEST_SIZE=20_000_000 # in bytes, ~20 MB -ENV CONDUIT_ALLOW_REGISTRATION=true -ENV CONDUIT_ALLOW_FEDERATION=true -ENV CONDUIT_TRUSTED_SERVERS=["matrix.org"] -#ENV CONDUIT_MAX_CONCURRENT_REQUESTS=100 -#ENV CONDUIT_LOG=info,rocket=off,_=off,sled=off -ENV CONDUIT_ADDRESS=0.0.0.0 -ENV CONDUIT_CONFIG='' # Ignore this +ENV CONDUIT_PORT=6167 \ + CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \ + CONDUIT_CONFIG='' # Set no config file to do all configuration with env vars # Conduit needs: # ca-certificates: for https From 0be8500c4fec53d2442da7f3cb98ecc6cbe198da Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 13 Feb 2022 12:38:13 +0000 Subject: [PATCH 213/445] Set all env vars in docker README --- Dockerfile | 1 + docker/README.md | 12 +++++++++++- 2 files changed, 12 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 82ee9516..e6cdaf57 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,6 +36,7 @@ FROM docker.io/debian:bullseye-slim AS runner EXPOSE 6167 ENV CONDUIT_PORT=6167 \ + CONDUIT_ADDRESS="0.0.0.0" \ CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \ CONDUIT_CONFIG='' # Set no config file to do all configuration with env vars diff --git a/docker/README.md b/docker/README.md index 28ad06f3..f9d94ab2 100644 --- a/docker/README.md +++ b/docker/README.md @@ -24,7 +24,17 @@ which also will tag the resulting image as `matrixconduit/matrix-conduit:latest` After building the image you can simply run it with ```bash -docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/var/lib/matrix-conduit/ matrixconduit/matrix-conduit:latest +docker run -d -p 8448:6167 \ + -v db:/var/lib/matrix-conduit/ \ + -e CONDUIT_SERVER_NAME="your.server.name" \ + -e CONDUIT_DATABASE_BACKEND="rocksdb" \ + -e CONDUIT_ALLOW_REGISTRATION=true \ + -e CONDUIT_ALLOW_FEDERATION=true \ + -e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \ + -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ + -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ + -e CONDUIT_LOG="info,rocket=off,_=off,sled=off" \ + --name conduit matrixconduit/matrix-conduit:latest ``` or you can skip the build step and pull the image from one of the following registries: From 98b67da649c602574b4c4b304b3c52fdd0450641 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 16 Feb 2022 13:04:45 +0000 Subject: [PATCH 214/445] fix: Docker syntax --- Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index e6cdaf57..49c32244 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,7 +38,8 @@ EXPOSE 6167 ENV CONDUIT_PORT=6167 \ CONDUIT_ADDRESS="0.0.0.0" \ CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \ - CONDUIT_CONFIG='' # Set no config file to do all configuration with env vars + CONDUIT_CONFIG='' +# └─> Set no config file to do all configuration with env vars # Conduit needs: # ca-certificates: for https From b4225cb0fca88636e0a4d6213cfcea30c800ec1e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 16 Feb 2022 15:04:32 +0100 Subject: [PATCH 215/445] fix(docker): use user 1000 and standard db path --- Dockerfile | 15 ++++---- docker/ci-binaries-packaging.Dockerfile | 48 +++++++++++++------------ 2 files changed, 34 insertions(+), 29 deletions(-) diff --git a/Dockerfile b/Dockerfile index 49c32244..76d10ea9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,7 +3,8 @@ FROM docker.io/rust:1.58-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies -RUN apt update && apt -y install libclang-dev +RUN apt-get update && \ + apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5 # == Build dependencies without our own code separately for caching == # @@ -44,7 +45,7 @@ ENV CONDUIT_PORT=6167 \ # Conduit needs: # ca-certificates: for https # iproute2 & wget: for the healthcheck script -RUN apt update && apt -y install \ +RUN apt-get update && apt-get -y --no-install-recommends install \ ca-certificates \ iproute2 \ wget \ @@ -61,12 +62,12 @@ HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit # Improve security: Don't run stuff as root, that does not need to run as root -# Add 'conduit' user and group (100:82). The UID:GID choice is to be compatible -# with previous, Alpine-based containers, where the user and group were both -# named 'www-data'. +# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. +ARG USER_ID=1000 +ARG GROUP_ID=1000 RUN set -x ; \ - groupadd -r -g 82 conduit ; \ - useradd -r -M -d /srv/conduit -o -u 100 -g conduit conduit && exit 0 ; exit 1 + groupadd -r -g ${GROUP_ID} conduit ; \ + useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1 # Change ownership of Conduit files to conduit user and group and make the healthcheck executable: RUN chown -cR conduit:conduit /srv/conduit && \ diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 3731bac1..ee1ca4ca 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -14,9 +14,14 @@ FROM docker.io/alpine:3.15.0 AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Note from @jfowl: I would like to remove the config file in the future and just have the Docker version be configured with envs. -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" \ - CONDUIT_PORT=6167 +# Users are expected to mount a volume to this directory: +ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit + +ENV CONDUIT_PORT=6167 \ + CONDUIT_ADDRESS="0.0.0.0" \ + CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ + CONDUIT_CONFIG='' +# └─> Set no config file to do all configuration with env vars # Conduit needs: # ca-certificates: for https @@ -25,7 +30,6 @@ RUN apk add --no-cache \ ca-certificates \ iproute2 - ARG CREATED ARG VERSION ARG GIT_REF @@ -45,36 +49,36 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.ref.name="" # Created directory for the database and media files -RUN mkdir -p /srv/conduit/.local/share/conduit +RUN mkdir -p ${DEFAULT_DB_PATH} # Test if Conduit is still alive, uses the same endpoint as Element COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh - -# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64") -# copy the matching binary into this docker image -ARG TARGETPLATFORM -COPY ./$TARGETPLATFORM /srv/conduit/conduit - - # Improve security: Don't run stuff as root, that does not need to run as root: -# Add www-data user and group with UID 82, as used by alpine -# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install +# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. +ARG USER_ID=1000 +ARG GROUP_ID=1000 RUN set -x ; \ - addgroup -Sg 82 www-data 2>/dev/null ; \ - adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \ - addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 + deluser --remove-home www-data ; \ + addgroup -S -g ${GROUP_ID} conduit 2>/dev/null ; \ + adduser -S -u ${USER_ID} -D -H -h /srv/conduit -G conduit -g conduit conduit 2>/dev/null ; \ + addgroup conduit conduit 2>/dev/null && exit 0 ; exit 1 -# Change ownership of Conduit files to www-data user and group -RUN chown -cR www-data:www-data /srv/conduit -RUN chmod +x /srv/conduit/healthcheck.sh +# Change ownership of Conduit files to conduit user and group +RUN chown -cR conduit:conduit /srv/conduit && \ + chmod +x /srv/conduit/healthcheck.sh -# Change user to www-data -USER www-data +# Change user to conduit +USER conduit # Set container home directory WORKDIR /srv/conduit # Run Conduit and print backtraces on panics ENV RUST_BACKTRACE=1 ENTRYPOINT [ "/srv/conduit/conduit" ] + +# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64") +# copy the matching binary into this docker image +ARG TARGETPLATFORM +COPY --chown=conduit:conduit ./$TARGETPLATFORM /srv/conduit/conduit From bcd6c0bf532930b31873431ebdf601f4699d7d69 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Thu, 17 Feb 2022 11:14:50 +0000 Subject: [PATCH 216/445] feat: Provide sane defaults for vscode developing This includes some extensions and a debug profile --- .vscode/extensions.json | 11 +++++++++++ .vscode/launch.json | 35 +++++++++++++++++++++++++++++++++++ .vscode/settings.json | 2 +- 3 files changed, 47 insertions(+), 1 deletion(-) create mode 100644 .vscode/extensions.json create mode 100644 .vscode/launch.json diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 00000000..7963e9d4 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,11 @@ +{ + "recommendations": [ + "matklad.rust-analyzer", + "bungcip.better-toml", + "ms-azuretools.vscode-docker", + "eamodio.gitlens", + "serayuzgur.crates", + "vadimcn.vscode-lldb", + "timonwong.shellcheck" + ] +} \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..da521604 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,35 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug conduit", + "sourceLanguages": ["rust"], + "cargo": { + "args": [ + "build", + "--bin=conduit", + "--package=conduit" + ], + "filter": { + "name": "conduit", + "kind": "bin" + } + }, + "args": [], + "env": { + "RUST_BACKTRACE": "1", + "CONDUIT_CONFIG": "", + "CONDUIT_SERVER_NAME": "localhost", + "CONDUIT_DATABASE_PATH": "/tmp", + "CONDUIT_ADDRESS": "0.0.0.0", + "CONDUIT_PORT": "6167" + }, + "cwd": "${workspaceFolder}" + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index c3f66054..95294d48 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,3 +1,3 @@ { - "rust-analyzer.procMacro.enable": true + "rust-analyzer.procMacro.enable": true, } \ No newline at end of file From e57cd437d4cfc55757220ffa02a3f6312a792567 Mon Sep 17 00:00:00 2001 From: TomZ Date: Thu, 17 Feb 2022 21:59:55 +0100 Subject: [PATCH 217/445] Slight clarification Which version it started being beta in is quite irrelevant here. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 45b16fd5..730b2512 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ HQ. #### What is the current status? -As of 2021-09-01, Conduit is Beta, meaning you can join and participate in most +Conduit is Beta, meaning you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time. From 27692a2f149c010dc08a610599a9c1035c815f91 Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Fri, 18 Feb 2022 11:52:00 +0100 Subject: [PATCH 218/445] Remove useless serde roundtrips --- src/client_server/directory.rs | 14 +------------- src/server_server.rs | 26 ++------------------------ 2 files changed, 3 insertions(+), 37 deletions(-) diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 75601fe7..62bf566a 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -149,19 +149,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .await?; return Ok(get_public_rooms_filtered::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c) - .expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), + chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, diff --git a/src/server_server.rs b/src/server_server.rs index 39210555..372a76f8 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -591,18 +591,7 @@ pub async fn get_public_rooms_filtered_route( .await?; Ok(get_public_rooms_filtered::v1::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c).expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), + chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, @@ -631,18 +620,7 @@ pub async fn get_public_rooms_route( .await?; Ok(get_public_rooms::v1::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c).expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), + chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, From e9f87e1952b8ae1588347d26d146986df623afe2 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Fri, 18 Feb 2022 15:33:14 +0100 Subject: [PATCH 219/445] update ruma --- Cargo.lock | 38 ++++++------- Cargo.toml | 2 +- src/client_server/account.rs | 48 ++++++++-------- src/client_server/alias.rs | 27 +++++---- src/client_server/backup.rs | 88 ++++++++++++++--------------- src/client_server/capabilities.rs | 8 +-- src/client_server/config.rs | 28 ++++----- src/client_server/context.rs | 11 ++-- src/client_server/device.rs | 36 ++++++------ src/client_server/directory.rs | 41 +++++++------- src/client_server/filter.rs | 14 ++--- src/client_server/keys.rs | 56 +++++++++--------- src/client_server/media.rs | 40 ++++++------- src/client_server/membership.rs | 76 ++++++++++++------------- src/client_server/message.rs | 22 ++++---- src/client_server/presence.rs | 14 ++--- src/client_server/profile.rs | 38 ++++++------- src/client_server/push.rs | 62 ++++++++++---------- src/client_server/read_marker.rs | 17 +++--- src/client_server/redact.rs | 8 +-- src/client_server/report.rs | 8 +-- src/client_server/room.rs | 40 ++++++------- src/client_server/search.rs | 15 +++-- src/client_server/session.rs | 36 ++++++------ src/client_server/state.rs | 32 +++++------ src/client_server/sync.rs | 62 +++++++++++--------- src/client_server/tag.rs | 20 +++---- src/client_server/thirdparty.rs | 8 +-- src/client_server/to_device.rs | 10 ++-- src/client_server/typing.rs | 11 ++-- src/client_server/unversioned.rs | 2 +- src/client_server/user_directory.rs | 10 ++-- src/client_server/voip.rs | 8 +-- src/database/globals.rs | 6 +- src/database/key_backups.rs | 2 +- src/database/pusher.rs | 12 ++-- src/database/rooms.rs | 2 +- src/database/uiaa.rs | 2 +- src/database/users.rs | 5 +- src/error.rs | 2 +- src/ruma_wrapper.rs | 2 +- src/server_server.rs | 45 +++++++++------ 42 files changed, 514 insertions(+), 500 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index e7ffe5bd..997cedc6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2081,7 +2081,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "assign", "js_int", @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma-api" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "bytes", "http", @@ -2119,7 +2119,7 @@ dependencies = [ [[package]] name = "ruma-api-macros" version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "ruma-api", "ruma-common", @@ -2144,7 +2144,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "assign", "bytes", @@ -2164,7 +2164,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "indexmap", "js_int", @@ -2179,7 +2179,7 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "indoc", "js_int", @@ -2196,7 +2196,7 @@ dependencies = [ [[package]] name = "ruma-events-macros" version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2207,7 +2207,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "js_int", "ruma-api", @@ -2222,7 +2222,7 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2237,7 +2237,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-macros" version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2247,7 +2247,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "thiserror", ] @@ -2255,7 +2255,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "js_int", "ruma-api", @@ -2268,7 +2268,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "js_int", "ruma-api", @@ -2283,12 +2283,12 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "base64 0.13.0", "bytes", "form_urlencoded", - "itoa 0.4.8", + "itoa 1.0.1", "js_int", "ruma-serde-macros", "serde", @@ -2298,7 +2298,7 @@ dependencies = [ [[package]] name = "ruma-serde-macros" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2309,7 +2309,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2326,7 +2326,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=aed09886946f8817a478981cae1b6b8b5d4e7b7d#aed09886946f8817a478981cae1b6b8b5d4e7b7d" +source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index ab7b47d0..b9affa76 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "aed09886946f8817a478981cae1b6b8b5d4e7b7d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index c15d820e..1ff0fa08 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -8,14 +8,12 @@ use crate::{ }; use ruma::{ api::client::{ - error::ErrorKind, - r0::{ - account::{ - change_password, deactivate, get_3pids, get_username_availability, register, - whoami, ThirdPartyIdRemovalStatus, - }, - uiaa::{AuthFlow, AuthType, UiaaInfo}, + account::{ + change_password, deactivate, get_3pids, get_username_availability, register, whoami, + ThirdPartyIdRemovalStatus, }, + error::ErrorKind, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ room::member::{MembershipState, RoomMemberEventContent}, @@ -42,8 +40,8 @@ const GUEST_NAME_LENGTH: usize = 10; /// Note: This will not reserve the username, so the username might become invalid when trying to register pub async fn get_register_available_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { // Validate user id let user_id = UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) @@ -67,7 +65,7 @@ pub async fn get_register_available_route( // TODO add check for appservice namespaces // If no if check is true we have an username that's available to be used. - Ok(get_username_availability::Response { available: true }) + Ok(get_username_availability::v3::Response { available: true }) } /// # `POST /_matrix/client/r0/register` @@ -85,8 +83,8 @@ pub async fn get_register_available_route( /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if !db.globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -206,7 +204,7 @@ pub async fn register_route( // Inhibit login does not work for guests if !is_guest && body.inhibit_login { - return Ok(register::Response { + return Ok(register::v3::Response { access_token: None, user_id, device_id: None, @@ -244,7 +242,7 @@ pub async fn register_route( db.flush()?; - Ok(register::Response { + Ok(register::v3::Response { access_token: Some(token), user_id, device_id: Some(device_id), @@ -267,8 +265,8 @@ pub async fn register_route( /// - Triggers device list updates pub async fn change_password_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -321,7 +319,7 @@ pub async fn change_password_route( db.flush()?; - Ok(change_password::Response {}) + Ok(change_password::v3::Response {}) } /// # `GET _matrix/client/r0/account/whoami` @@ -329,9 +327,9 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -pub async fn whoami_route(body: Ruma) -> Result { +pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(whoami::Response { + Ok(whoami::v3::Response { user_id: sender_user.clone(), }) } @@ -348,8 +346,8 @@ pub async fn whoami_route(body: Ruma) -> Result>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -442,7 +440,7 @@ pub async fn deactivate_route( db.flush()?; - Ok(deactivate::Response { + Ok(deactivate::v3::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, }) } @@ -452,8 +450,10 @@ pub async fn deactivate_route( /// Get a list of third party identifiers associated with this account. /// /// - Currently always returns empty list -pub async fn third_party_route(body: Ruma) -> Result { +pub async fn third_party_route( + body: Ruma, +) -> Result { let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_3pids::Response::new(Vec::new())) + Ok(get_3pids::v3::Response::new(Vec::new())) } diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 509372c4..75cf85e5 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -4,8 +4,8 @@ use ruma::{ api::{ appservice, client::{ + alias::{create_alias, delete_alias, get_alias}, error::ErrorKind, - r0::alias::{create_alias, delete_alias, get_alias}, }, federation, }, @@ -17,8 +17,8 @@ use ruma::{ /// Creates a new room alias on this server. pub async fn create_alias_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -35,7 +35,7 @@ pub async fn create_alias_route( db.flush()?; - Ok(create_alias::Response::new()) + Ok(create_alias::v3::Response::new()) } /// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}` @@ -46,8 +46,8 @@ pub async fn create_alias_route( /// - TODO: Update canonical alias event pub async fn delete_alias_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -61,7 +61,7 @@ pub async fn delete_alias_route( db.flush()?; - Ok(delete_alias::Response::new()) + Ok(delete_alias::v3::Response::new()) } /// # `GET /_matrix/client/r0/directory/room/{roomAlias}` @@ -71,15 +71,15 @@ pub async fn delete_alias_route( /// - TODO: Suggest more servers to join via pub async fn get_alias_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { get_alias_helper(&db, &body.room_alias).await } pub(crate) async fn get_alias_helper( db: &Database, room_alias: &RoomAliasId, -) -> Result { +) -> Result { if room_alias.server_name() != db.globals.server_name() { let response = db .sending @@ -90,7 +90,10 @@ pub(crate) async fn get_alias_helper( ) .await?; - return Ok(get_alias::Response::new(response.room_id, response.servers)); + return Ok(get_alias::v3::Response::new( + response.room_id, + response.servers, + )); } let mut room_id = None; @@ -141,7 +144,7 @@ pub(crate) async fn get_alias_helper( } }; - Ok(get_alias::Response::new( + Ok(get_alias::v3::Response::new( room_id, vec![db.globals.server_name().to_owned()], )) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 14c239b1..808d8868 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -1,12 +1,12 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ - error::ErrorKind, - r0::backup::{ + backup::{ add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup, delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys, get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys, get_latest_backup, update_backup, }, + error::ErrorKind, }; /// # `POST /_matrix/client/r0/room_keys/version` @@ -14,8 +14,8 @@ use ruma::api::client::{ /// Creates a new backup. pub async fn create_backup_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = db .key_backups @@ -23,7 +23,7 @@ pub async fn create_backup_route( db.flush()?; - Ok(create_backup::Response { version }) + Ok(create_backup::v3::Response { version }) } /// # `PUT /_matrix/client/r0/room_keys/version/{version}` @@ -31,15 +31,15 @@ pub async fn create_backup_route( /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; db.flush()?; - Ok(update_backup::Response {}) + Ok(update_backup::v3::Response {}) } /// # `GET /_matrix/client/r0/room_keys/version` @@ -47,8 +47,8 @@ pub async fn update_backup_route( /// Get information about the latest backup version. pub async fn get_latest_backup_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = @@ -59,7 +59,7 @@ pub async fn get_latest_backup_route( "Key backup does not exist.", ))?; - Ok(get_latest_backup::Response { + Ok(get_latest_backup::v3::Response { algorithm, count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &version)?, @@ -72,8 +72,8 @@ pub async fn get_latest_backup_route( /// Get information about an existing backup. pub async fn get_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db .key_backups @@ -83,7 +83,7 @@ pub async fn get_backup_route( "Key backup does not exist.", ))?; - Ok(get_backup::Response { + Ok(get_backup::v3::Response { algorithm, count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -98,15 +98,15 @@ pub async fn get_backup_route( /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_backup(sender_user, &body.version)?; db.flush()?; - Ok(delete_backup::Response {}) + Ok(delete_backup::v3::Response {}) } /// # `PUT /_matrix/client/r0/room_keys/keys` @@ -118,8 +118,8 @@ pub async fn delete_backup_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -149,7 +149,7 @@ pub async fn add_backup_keys_route( db.flush()?; - Ok(add_backup_keys::Response { + Ok(add_backup_keys::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -164,8 +164,8 @@ pub async fn add_backup_keys_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -193,7 +193,7 @@ pub async fn add_backup_key_sessions_route( db.flush()?; - Ok(add_backup_key_sessions::Response { + Ok(add_backup_key_sessions::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -208,8 +208,8 @@ pub async fn add_backup_key_sessions_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -235,7 +235,7 @@ pub async fn add_backup_key_session_route( db.flush()?; - Ok(add_backup_key_session::Response { + Ok(add_backup_key_session::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -246,13 +246,13 @@ pub async fn add_backup_key_session_route( /// Retrieves all keys from the backup. pub async fn get_backup_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let rooms = db.key_backups.get_all(sender_user, &body.version)?; - Ok(get_backup_keys::Response { rooms }) + Ok(get_backup_keys::v3::Response { rooms }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` @@ -260,15 +260,15 @@ pub async fn get_backup_keys_route( /// Retrieves all keys from the backup for a given room. pub async fn get_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sessions = db .key_backups .get_room(sender_user, &body.version, &body.room_id)?; - Ok(get_backup_key_sessions::Response { sessions }) + Ok(get_backup_key_sessions::v3::Response { sessions }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -276,8 +276,8 @@ pub async fn get_backup_key_sessions_route( /// Retrieves a key from the backup. pub async fn get_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let key_data = db @@ -288,7 +288,7 @@ pub async fn get_backup_key_session_route( "Backup key not found for this user's session.", ))?; - Ok(get_backup_key_session::Response { key_data }) + Ok(get_backup_key_session::v3::Response { key_data }) } /// # `DELETE /_matrix/client/r0/room_keys/keys` @@ -296,15 +296,15 @@ pub async fn get_backup_key_session_route( /// Delete the keys from the backup. pub async fn delete_backup_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_all_keys(sender_user, &body.version)?; db.flush()?; - Ok(delete_backup_keys::Response { + Ok(delete_backup_keys::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -315,8 +315,8 @@ pub async fn delete_backup_keys_route( /// Delete the keys from the backup for a given room. pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -324,7 +324,7 @@ pub async fn delete_backup_key_sessions_route( db.flush()?; - Ok(delete_backup_key_sessions::Response { + Ok(delete_backup_key_sessions::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -335,8 +335,8 @@ pub async fn delete_backup_key_sessions_route( /// Delete a key from the backup. pub async fn delete_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -344,7 +344,7 @@ pub async fn delete_backup_key_session_route( db.flush()?; - Ok(delete_backup_key_session::Response { + Ok(delete_backup_key_session::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index b1e072e7..ac2e59f6 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,6 +1,6 @@ use crate::{Result, Ruma}; use ruma::{ - api::client::r0::capabilities::{ + api::client::capabilities::{ get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, }, RoomVersionId, @@ -11,8 +11,8 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( - _body: Ruma, -) -> Result { + _body: Ruma, +) -> Result { let mut available = BTreeMap::new(); available.insert(RoomVersionId::V5, RoomVersionStability::Stable); available.insert(RoomVersionId::V6, RoomVersionStability::Stable); @@ -23,5 +23,5 @@ pub async fn get_capabilities_route( available, }; - Ok(get_capabilities::Response { capabilities }) + Ok(get_capabilities::v3::Response { capabilities }) } diff --git a/src/client_server/config.rs b/src/client_server/config.rs index 83bb7a59..a9a2fb14 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -1,11 +1,11 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ - error::ErrorKind, - r0::config::{ + config::{ get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, }, + error::ErrorKind, }, events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent}, serde::Raw, @@ -18,8 +18,8 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// Sets some account data for the sender user. pub async fn set_global_account_data_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data: serde_json::Value = serde_json::from_str(body.data.get()) @@ -40,7 +40,7 @@ pub async fn set_global_account_data_route( db.flush()?; - Ok(set_global_account_data::Response {}) + Ok(set_global_account_data::v3::Response {}) } /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` @@ -48,8 +48,8 @@ pub async fn set_global_account_data_route( /// Sets some room account data for the sender user. pub async fn set_room_account_data_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let data: serde_json::Value = serde_json::from_str(body.data.get()) @@ -70,7 +70,7 @@ pub async fn set_room_account_data_route( db.flush()?; - Ok(set_room_account_data::Response {}) + Ok(set_room_account_data::v3::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` @@ -78,8 +78,8 @@ pub async fn set_room_account_data_route( /// Gets some account data for the sender user. pub async fn get_global_account_data_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = db @@ -91,7 +91,7 @@ pub async fn get_global_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_global_account_data::Response { account_data }) + Ok(get_global_account_data::v3::Response { account_data }) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` @@ -99,8 +99,8 @@ pub async fn get_global_account_data_route( /// Gets some room account data for the sender user. pub async fn get_room_account_data_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: Box = db @@ -116,7 +116,7 @@ pub async fn get_room_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_room_account_data::Response { account_data }) + Ok(get_room_account_data::v3::Response { account_data }) } #[derive(Deserialize)] diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 167d0cc5..2f6a2eac 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,9 +1,6 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{context::get_context, filter::LazyLoadOptions}, - }, + api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, events::EventType, }; use std::{collections::HashSet, convert::TryFrom}; @@ -17,8 +14,8 @@ use tracing::error; /// joined, depending on history_visibility) pub async fn get_context_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -178,7 +175,7 @@ pub async fn get_context_route( } } - let resp = get_context::Response { + let resp = get_context::v3::Response { start: start_token, end: end_token, events_before, diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 76172d21..09c94064 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -1,10 +1,8 @@ use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; use ruma::api::client::{ + device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, - r0::{ - device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, - uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }; use super::SESSION_ID_LENGTH; @@ -14,8 +12,8 @@ use super::SESSION_ID_LENGTH; /// Get metadata on all devices of the sender user. pub async fn get_devices_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let devices: Vec = db @@ -24,7 +22,7 @@ pub async fn get_devices_route( .filter_map(|r| r.ok()) // Filter out buggy devices .collect(); - Ok(get_devices::Response { devices }) + Ok(get_devices::v3::Response { devices }) } /// # `GET /_matrix/client/r0/devices/{deviceId}` @@ -32,8 +30,8 @@ pub async fn get_devices_route( /// Get metadata on a single device of the sender user. pub async fn get_device_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device = db @@ -41,7 +39,7 @@ pub async fn get_device_route( .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - Ok(get_device::Response { device }) + Ok(get_device::v3::Response { device }) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -49,8 +47,8 @@ pub async fn get_device_route( /// Updates the metadata on a given device of the sender user. pub async fn update_device_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device = db @@ -65,7 +63,7 @@ pub async fn update_device_route( db.flush()?; - Ok(update_device::Response {}) + Ok(update_device::v3::Response {}) } /// # `DELETE /_matrix/client/r0/devices/{deviceId}` @@ -79,8 +77,8 @@ pub async fn update_device_route( /// - Triggers device list updates pub async fn delete_device_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -121,7 +119,7 @@ pub async fn delete_device_route( db.flush()?; - Ok(delete_device::Response {}) + Ok(delete_device::v3::Response {}) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -137,8 +135,8 @@ pub async fn delete_device_route( /// - Triggers device list updates pub async fn delete_devices_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -181,5 +179,5 @@ pub async fn delete_devices_route( db.flush()?; - Ok(delete_devices::Response {}) + Ok(delete_devices::v3::Response {}) } diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index 62bf566a..ad88254e 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -2,14 +2,12 @@ use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; use ruma::{ api::{ client::{ - error::ErrorKind, - r0::{ - directory::{ - get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, - }, - room, + directory::{ + get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, }, + error::ErrorKind, + room, }, federation, }, @@ -36,8 +34,8 @@ use tracing::{info, warn}; /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { get_public_rooms_filtered_helper( &db, body.server.as_deref(), @@ -56,8 +54,8 @@ pub async fn get_public_rooms_filtered_route( /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let response = get_public_rooms_filtered_helper( &db, body.server.as_deref(), @@ -68,7 +66,7 @@ pub async fn get_public_rooms_route( ) .await?; - Ok(get_public_rooms::Response { + Ok(get_public_rooms::v3::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, @@ -83,8 +81,8 @@ pub async fn get_public_rooms_route( /// - TODO: Access control checks pub async fn set_room_visibility_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); match &body.visibility { @@ -103,7 +101,7 @@ pub async fn set_room_visibility_route( db.flush()?; - Ok(set_room_visibility::Response {}) + Ok(set_room_visibility::v3::Response {}) } /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` @@ -111,9 +109,9 @@ pub async fn set_room_visibility_route( /// Gets the visibility of a given room in the room directory. pub async fn get_room_visibility_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { - Ok(get_room_visibility::Response { + body: Ruma>, +) -> Result { + Ok(get_room_visibility::v3::Response { visibility: if db.rooms.is_public_room(&body.room_id)? { room::Visibility::Public } else { @@ -129,7 +127,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, -) -> Result { +) -> Result { if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str()) { let response = db @@ -148,7 +146,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( ) .await?; - return Ok(get_public_rooms_filtered::Response { + return Ok(get_public_rooms_filtered::v3::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, @@ -189,7 +187,6 @@ pub(crate) async fn get_public_rooms_filtered_helper( let room_id = room_id?; let chunk = PublicRoomsChunk { - aliases: Vec::new(), canonical_alias: db .rooms .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? @@ -328,7 +325,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Some(format!("n{}", num_since + limit)) }; - Ok(get_public_rooms_filtered::Response { + Ok(get_public_rooms_filtered::v3::Response { chunk, prev_batch, next_batch, diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index a606aeb4..379950f4 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -1,7 +1,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, - r0::filter::{create_filter, get_filter}, + filter::{create_filter, get_filter}, }; /// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` @@ -11,15 +11,15 @@ use ruma::api::client::{ /// - A user can only access their own filters pub async fn get_filter_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let filter = match db.users.get_filter(sender_user, &body.filter_id)? { Some(filter) => filter, None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), }; - Ok(get_filter::Response::new(filter)) + Ok(get_filter::v3::Response::new(filter)) } /// # `PUT /_matrix/client/r0/user/{userId}/filter` @@ -27,10 +27,10 @@ pub async fn get_filter_route( /// Creates a new filter to be used by other endpoints. pub async fn create_filter_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(create_filter::Response::new( + Ok(create_filter::v3::Response::new( db.users.create_filter(sender_user, &body.filter)?, )) } diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 2ea62a87..525c7790 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -5,13 +5,11 @@ use ruma::{ api::{ client::{ error::ErrorKind, - r0::{ - keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, - upload_signing_keys, - }, - uiaa::{AuthFlow, AuthType, UiaaInfo}, + keys::{ + claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + upload_signing_keys, }, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }, federation, }, @@ -29,8 +27,8 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) pub async fn upload_keys_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -59,7 +57,7 @@ pub async fn upload_keys_route( db.flush()?; - Ok(upload_keys::Response { + Ok(upload_keys::v3::Response { one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, }) } @@ -73,8 +71,8 @@ pub async fn upload_keys_route( /// - The master and self-signing keys contain signatures that the user is allowed to see pub async fn get_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let response = get_keys_helper( @@ -93,8 +91,8 @@ pub async fn get_keys_route( /// Claims one-time keys pub async fn claim_keys_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let response = claim_keys_helper(&body.one_time_keys, &db).await?; db.flush()?; @@ -109,8 +107,8 @@ pub async fn claim_keys_route( /// - Requires UIAA to verify password pub async fn upload_signing_keys_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -160,7 +158,7 @@ pub async fn upload_signing_keys_route( db.flush()?; - Ok(upload_signing_keys::Response {}) + Ok(upload_signing_keys::v3::Response {}) } /// # `POST /_matrix/client/r0/keys/signatures/upload` @@ -168,12 +166,14 @@ pub async fn upload_signing_keys_route( /// Uploads end-to-end key signatures from the sender user. pub async fn upload_signatures_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for (user_id, signed_keys) in &body.signed_keys { for (key_id, signed_key) in signed_keys { + let signed_key = serde_json::to_value(signed_key).unwrap(); + for signature in signed_key .get("signatures") .ok_or(Error::BadRequest( @@ -219,7 +219,9 @@ pub async fn upload_signatures_route( db.flush()?; - Ok(upload_signatures::Response {}) + Ok(upload_signatures::v3::Response { + failures: BTreeMap::new(), // TODO: integrate + }) } /// # `POST /_matrix/client/r0/keys/changes` @@ -229,8 +231,8 @@ pub async fn upload_signatures_route( /// - TODO: left users pub async fn get_key_changes_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device_list_updates = HashSet::new(); @@ -266,7 +268,7 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); } - Ok(get_key_changes::Response { + Ok(get_key_changes::v3::Response { changed: device_list_updates.into_iter().collect(), left: Vec::new(), // TODO }) @@ -277,7 +279,7 @@ pub(crate) async fn get_keys_helper bool>( device_keys_input: &BTreeMap, Vec>>, allowed_signatures: F, db: &Database, -) -> Result { +) -> Result { let mut master_keys = BTreeMap::new(); let mut self_signing_keys = BTreeMap::new(); let mut user_signing_keys = BTreeMap::new(); @@ -386,7 +388,7 @@ pub(crate) async fn get_keys_helper bool>( } } - Ok(get_keys::Response { + Ok(get_keys::v3::Response { master_keys, self_signing_keys, user_signing_keys, @@ -397,7 +399,7 @@ pub(crate) async fn get_keys_helper bool>( fn add_unsigned_device_display_name( keys: &mut Raw, - metadata: ruma::api::client::r0::device::Device, + metadata: ruma::api::client::device::Device, ) -> serde_json::Result<()> { if let Some(display_name) = metadata.display_name { let mut object = keys.deserialize_as::>()?; @@ -416,7 +418,7 @@ fn add_unsigned_device_display_name( pub(crate) async fn claim_keys_helper( one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, db: &Database, -) -> Result { +) -> Result { let mut one_time_keys = BTreeMap::new(); let mut get_over_federation = BTreeMap::new(); @@ -468,7 +470,7 @@ pub(crate) async fn claim_keys_helper( } } - Ok(claim_keys::Response { + Ok(claim_keys::v3::Response { failures, one_time_keys, }) diff --git a/src/client_server/media.rs b/src/client_server/media.rs index dcdea05a..71dbed68 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -4,7 +4,7 @@ use crate::{ }; use ruma::api::client::{ error::ErrorKind, - r0::media::{ + media::{ create_content, get_content, get_content_as_filename, get_content_thumbnail, get_media_config, }, @@ -17,9 +17,9 @@ const MXC_LENGTH: usize = 32; /// Returns max upload size. pub async fn get_media_config_route( db: DatabaseGuard, - _body: Ruma, -) -> Result { - Ok(get_media_config::Response { + _body: Ruma, +) -> Result { + Ok(get_media_config::v3::Response { upload_size: db.globals.max_request_size().into(), }) } @@ -32,8 +32,8 @@ pub async fn get_media_config_route( /// - Media will be saved in the media/ directory pub async fn create_content_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let mxc = format!( "mxc://{}/{}", db.globals.server_name(), @@ -56,7 +56,7 @@ pub async fn create_content_route( db.flush()?; - Ok(create_content::Response { + Ok(create_content::v3::Response { content_uri: mxc.try_into().expect("Invalid mxc:// URI"), blurhash: None, }) @@ -67,13 +67,13 @@ pub async fn get_remote_content( mxc: &str, server_name: &ruma::ServerName, media_id: &str, -) -> Result { +) -> Result { let content_response = db .sending .send_federation_request( &db.globals, server_name, - get_content::Request { + get_content::v3::Request { allow_remote: false, server_name, media_id, @@ -101,8 +101,8 @@ pub async fn get_remote_content( /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -111,7 +111,7 @@ pub async fn get_content_route( file, }) = db.media.get(&db.globals, &mxc).await? { - Ok(get_content::Response { + Ok(get_content::v3::Response { file, content_type, content_disposition, @@ -132,8 +132,8 @@ pub async fn get_content_route( /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -142,7 +142,7 @@ pub async fn get_content_as_filename_route( file, }) = db.media.get(&db.globals, &mxc).await? { - Ok(get_content_as_filename::Response { + Ok(get_content_as_filename::v3::Response { file, content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), @@ -151,7 +151,7 @@ pub async fn get_content_as_filename_route( let remote_content_response = get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; - Ok(get_content_as_filename::Response { + Ok(get_content_as_filename::v3::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), content_type: remote_content_response.content_type, file: remote_content_response.file, @@ -168,8 +168,8 @@ pub async fn get_content_as_filename_route( /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { @@ -188,14 +188,14 @@ pub async fn get_content_thumbnail_route( ) .await? { - Ok(get_content_thumbnail::Response { file, content_type }) + Ok(get_content_thumbnail::v3::Response { file, content_type }) } else if &*body.server_name != db.globals.server_name() && body.allow_remote { let get_thumbnail_response = db .sending .send_federation_request( &db.globals, &body.server_name, - get_content_thumbnail::Request { + get_content_thumbnail::v3::Request { allow_remote: false, height: body.height, width: body.width, diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 447f829e..0f5e7c2c 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -8,7 +8,7 @@ use ruma::{ api::{ client::{ error::ErrorKind, - r0::membership::{ + membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, unban_user, IncomingThirdPartySigned, @@ -44,8 +44,8 @@ use tracing::{debug, error, warn}; /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut servers: HashSet<_> = db @@ -84,8 +84,8 @@ pub async fn join_room_by_id_route( /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; @@ -124,7 +124,7 @@ pub async fn join_room_by_id_or_alias_route( db.flush()?; - Ok(join_room_by_id_or_alias::Response { + Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id, }) } @@ -136,15 +136,15 @@ pub async fn join_room_by_id_or_alias_route( /// - This should always work if the user is currently joined. pub async fn leave_room_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.leave_room(sender_user, &body.room_id, &db).await?; db.flush()?; - Ok(leave_room::Response::new()) + Ok(leave_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` @@ -152,14 +152,14 @@ pub async fn leave_room_route( /// Tries to send an invite event into the room. pub async fn invite_user_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { + if let invite_user::v3::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; db.flush()?; - Ok(invite_user::Response {}) + Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) } @@ -170,8 +170,8 @@ pub async fn invite_user_route( /// Tries to send a kick event into the room. pub async fn kick_user_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( @@ -221,7 +221,7 @@ pub async fn kick_user_route( db.flush()?; - Ok(kick_user::Response::new()) + Ok(kick_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` @@ -229,8 +229,8 @@ pub async fn kick_user_route( /// Tries to send a ban event into the room. pub async fn ban_user_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason @@ -291,7 +291,7 @@ pub async fn ban_user_route( db.flush()?; - Ok(ban_user::Response::new()) + Ok(ban_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` @@ -299,8 +299,8 @@ pub async fn ban_user_route( /// Tries to send an unban event into the room. pub async fn unban_user_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( @@ -349,7 +349,7 @@ pub async fn unban_user_route( db.flush()?; - Ok(unban_user::Response::new()) + Ok(unban_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/forget` @@ -362,15 +362,15 @@ pub async fn unban_user_route( /// be called from every device pub async fn forget_room_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.forget(&body.room_id, sender_user)?; db.flush()?; - Ok(forget_room::Response::new()) + Ok(forget_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/joined_rooms` @@ -378,11 +378,11 @@ pub async fn forget_room_route( /// Lists all rooms the user has joined. pub async fn joined_rooms_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(joined_rooms::Response { + Ok(joined_rooms::v3::Response { joined_rooms: db .rooms .rooms_joined(sender_user) @@ -398,8 +398,8 @@ pub async fn joined_rooms_route( /// - Only works if the user is currently joined pub async fn get_member_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? @@ -410,7 +410,7 @@ pub async fn get_member_events_route( )); } - Ok(get_member_events::Response { + Ok(get_member_events::v3::Response { chunk: db .rooms .room_state_full(&body.room_id)? @@ -429,8 +429,8 @@ pub async fn get_member_events_route( /// - TODO: An appservice just needs a puppet joined pub async fn joined_members_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -447,14 +447,14 @@ pub async fn joined_members_route( joined.insert( user_id, - joined_members::RoomMember { + joined_members::v3::RoomMember { display_name, avatar_url, }, ); } - Ok(joined_members::Response { joined }) + Ok(joined_members::v3::Response { joined }) } #[tracing::instrument(skip(db))] @@ -464,7 +464,7 @@ async fn join_room_by_id_helper( room_id: &RoomId, servers: &HashSet>, _third_party_signed: Option<&IncomingThirdPartySigned>, -) -> Result { +) -> Result { let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( @@ -489,7 +489,7 @@ async fn join_room_by_id_helper( .send_federation_request( &db.globals, remote_server, - federation::membership::create_join_event_template::v1::Request { + federation::membership::prepare_join_event::v1::Request { room_id, user_id: sender_user, ver: &[RoomVersionId::V5, RoomVersionId::V6], @@ -720,7 +720,7 @@ async fn join_room_by_id_helper( db.flush()?; - Ok(join_room_by_id::Response::new(room_id.to_owned())) + Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } fn validate_and_add_event_id( diff --git a/src/client_server/message.rs b/src/client_server/message.rs index 93d5b3bb..b5c41490 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -2,7 +2,7 @@ use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma use ruma::{ api::client::{ error::ErrorKind, - r0::message::{get_message_events, send_message_event}, + message::{get_message_events, send_message_event}, }, events::EventType, }; @@ -20,8 +20,8 @@ use std::{ /// - Tries to send the event into the room, auth rules will determine if it is allowed pub async fn send_message_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -61,7 +61,7 @@ pub async fn send_message_event_route( .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? .try_into() .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; - return Ok(send_message_event::Response { event_id }); + return Ok(send_message_event::v3::Response { event_id }); } let mut unsigned = BTreeMap::new(); @@ -93,7 +93,9 @@ pub async fn send_message_event_route( db.flush()?; - Ok(send_message_event::Response::new((*event_id).to_owned())) + Ok(send_message_event::v3::Response::new( + (*event_id).to_owned(), + )) } /// # `GET /_matrix/client/r0/rooms/{roomId}/messages` @@ -104,8 +106,8 @@ pub async fn send_message_event_route( /// joined, depending on history_visibility) pub async fn get_message_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -132,12 +134,12 @@ pub async fn get_message_events_route( let next_token; - let mut resp = get_message_events::Response::new(); + let mut resp = get_message_events::v3::Response::new(); let mut lazy_loaded = HashSet::new(); match body.dir { - get_message_events::Direction::Forward => { + get_message_events::v3::Direction::Forward => { let events_after: Vec<_> = db .rooms .pdus_after(sender_user, &body.room_id, from)? @@ -174,7 +176,7 @@ pub async fn get_message_events_route( resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_after; } - get_message_events::Direction::Backward => { + get_message_events::v3::Direction::Backward => { let events_before: Vec<_> = db .rooms .pdus_until(sender_user, &body.room_id, from)? diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 7549b1a7..9e6ce0b8 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -1,5 +1,5 @@ use crate::{database::DatabaseGuard, utils, Result, Ruma}; -use ruma::api::client::r0::presence::{get_presence, set_presence}; +use ruma::api::client::presence::{get_presence, set_presence}; use std::time::Duration; /// # `PUT /_matrix/client/r0/presence/{userId}/status` @@ -7,8 +7,8 @@ use std::time::Duration; /// Sets the presence state of the sender user. pub async fn set_presence_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for room_id in db.rooms.rooms_joined(sender_user) { @@ -38,7 +38,7 @@ pub async fn set_presence_route( db.flush()?; - Ok(set_presence::Response {}) + Ok(set_presence::v3::Response {}) } /// # `GET /_matrix/client/r0/presence/{userId}/status` @@ -48,8 +48,8 @@ pub async fn set_presence_route( /// - Only works if you share a room with the user pub async fn get_presence_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut presence_event = None; @@ -71,7 +71,7 @@ pub async fn get_presence_route( } if let Some(presence) = presence_event { - Ok(get_presence::Response { + Ok(get_presence::v3::Response { // TODO: Should ruma just use the presenceeventcontent type here? status_msg: presence.content.status_msg, currently_active: presence.content.currently_active, diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 33bfbb5c..30000272 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -3,7 +3,7 @@ use ruma::{ api::{ client::{ error::ErrorKind, - r0::profile::{ + profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, }, @@ -21,8 +21,8 @@ use std::sync::Arc; /// - Also makes sure other users receive the update using presence EDUs pub async fn set_displayname_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users @@ -108,7 +108,7 @@ pub async fn set_displayname_route( db.flush()?; - Ok(set_display_name::Response {}) + Ok(set_display_name::v3::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/displayname` @@ -118,8 +118,8 @@ pub async fn set_displayname_route( /// - If user is on another server: Fetches displayname over federation pub async fn get_displayname_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -133,12 +133,12 @@ pub async fn get_displayname_route( ) .await?; - return Ok(get_display_name::Response { + return Ok(get_display_name::v3::Response { displayname: response.displayname, }); } - Ok(get_display_name::Response { + Ok(get_display_name::v3::Response { displayname: db.users.displayname(&body.user_id)?, }) } @@ -150,8 +150,8 @@ pub async fn get_displayname_route( /// - Also makes sure other users receive the update using presence EDUs pub async fn set_avatar_url_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.users @@ -239,7 +239,7 @@ pub async fn set_avatar_url_route( db.flush()?; - Ok(set_avatar_url::Response {}) + Ok(set_avatar_url::v3::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/avatar_url` @@ -249,8 +249,8 @@ pub async fn set_avatar_url_route( /// - If user is on another server: Fetches avatar_url and blurhash over federation pub async fn get_avatar_url_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -264,13 +264,13 @@ pub async fn get_avatar_url_route( ) .await?; - return Ok(get_avatar_url::Response { + return Ok(get_avatar_url::v3::Response { avatar_url: response.avatar_url, blurhash: response.blurhash, }); } - Ok(get_avatar_url::Response { + Ok(get_avatar_url::v3::Response { avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, }) @@ -283,8 +283,8 @@ pub async fn get_avatar_url_route( /// - If user is on another server: Fetches profile over federation pub async fn get_profile_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db .sending @@ -298,7 +298,7 @@ pub async fn get_profile_route( ) .await?; - return Ok(get_profile::Response { + return Ok(get_profile::v3::Response { displayname: response.displayname, avatar_url: response.avatar_url, blurhash: response.blurhash, @@ -313,7 +313,7 @@ pub async fn get_profile_route( )); } - Ok(get_profile::Response { + Ok(get_profile::v3::Response { avatar_url: db.users.avatar_url(&body.user_id)?, blurhash: db.users.blurhash(&body.user_id)?, displayname: db.users.displayname(&body.user_id)?, diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 67b70d28..90f4e028 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -2,7 +2,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::push::{ + push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, @@ -17,8 +17,8 @@ use ruma::{ /// Retrieves the push rules event for this user. pub async fn get_pushrules_all_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db @@ -29,7 +29,7 @@ pub async fn get_pushrules_all_route( "PushRules event not found.", ))?; - Ok(get_pushrules_all::Response { + Ok(get_pushrules_all::v3::Response { global: event.content.global, }) } @@ -39,8 +39,8 @@ pub async fn get_pushrules_all_route( /// Retrieves a single specified push rule for this user. pub async fn get_pushrule_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db @@ -77,7 +77,7 @@ pub async fn get_pushrule_route( }; if let Some(rule) = rule { - Ok(get_pushrule::Response { rule }) + Ok(get_pushrule::v3::Response { rule }) } else { Err(Error::BadRequest( ErrorKind::NotFound, @@ -91,8 +91,8 @@ pub async fn get_pushrule_route( /// Creates a single specified push rule for this user. pub async fn set_pushrule_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -179,7 +179,7 @@ pub async fn set_pushrule_route( db.flush()?; - Ok(set_pushrule::Response {}) + Ok(set_pushrule::v3::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` @@ -187,8 +187,8 @@ pub async fn set_pushrule_route( /// Gets the actions of a single specified push rule for this user. pub async fn get_pushrule_actions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -233,7 +233,7 @@ pub async fn get_pushrule_actions_route( db.flush()?; - Ok(get_pushrule_actions::Response { + Ok(get_pushrule_actions::v3::Response { actions: actions.unwrap_or_default(), }) } @@ -243,8 +243,8 @@ pub async fn get_pushrule_actions_route( /// Sets the actions of a single specified push rule for this user. pub async fn set_pushrule_actions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -302,7 +302,7 @@ pub async fn set_pushrule_actions_route( db.flush()?; - Ok(set_pushrule_actions::Response {}) + Ok(set_pushrule_actions::v3::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` @@ -310,8 +310,8 @@ pub async fn set_pushrule_actions_route( /// Gets the enabled status of a single specified push rule for this user. pub async fn get_pushrule_enabled_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -361,7 +361,7 @@ pub async fn get_pushrule_enabled_route( db.flush()?; - Ok(get_pushrule_enabled::Response { enabled }) + Ok(get_pushrule_enabled::v3::Response { enabled }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` @@ -369,8 +369,8 @@ pub async fn get_pushrule_enabled_route( /// Sets the enabled status of a single specified push rule for this user. pub async fn set_pushrule_enabled_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -433,7 +433,7 @@ pub async fn set_pushrule_enabled_route( db.flush()?; - Ok(set_pushrule_enabled::Response {}) + Ok(set_pushrule_enabled::v3::Response {}) } /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` @@ -441,8 +441,8 @@ pub async fn set_pushrule_enabled_route( /// Deletes a single specified push rule for this user. pub async fn delete_pushrule_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -495,7 +495,7 @@ pub async fn delete_pushrule_route( db.flush()?; - Ok(delete_pushrule::Response {}) + Ok(delete_pushrule::v3::Response {}) } /// # `GET /_matrix/client/r0/pushers` @@ -503,11 +503,11 @@ pub async fn delete_pushrule_route( /// Gets all currently active pushers for the sender user. pub async fn get_pushers_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_pushers::Response { + Ok(get_pushers::v3::Response { pushers: db.pusher.get_pushers(sender_user)?, }) } @@ -519,8 +519,8 @@ pub async fn get_pushers_route( /// - TODO: Handle `append` pub async fn set_pushers_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); @@ -528,5 +528,5 @@ pub async fn set_pushers_route( db.flush()?; - Ok(set_pusher::Response::default()) + Ok(set_pusher::v3::Response::default()) } diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index cc6928d1..9422f218 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -1,9 +1,6 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{read_marker::set_read_marker, receipt::create_receipt}, - }, + api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, events::EventType, receipt::ReceiptType, MilliSecondsSinceUnixEpoch, @@ -18,8 +15,8 @@ use std::collections::BTreeMap; /// - If `read_receipt` is set: Update private marker and public read receipt EDU pub async fn set_read_marker_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let fully_read_event = ruma::events::fully_read::FullyReadEvent { @@ -75,7 +72,7 @@ pub async fn set_read_marker_route( db.flush()?; - Ok(set_read_marker::Response {}) + Ok(set_read_marker::v3::Response {}) } /// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` @@ -83,8 +80,8 @@ pub async fn set_read_marker_route( /// Sets private read marker and public read receipt EDU. pub async fn create_receipt_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.rooms.edus.private_read_set( @@ -126,5 +123,5 @@ pub async fn create_receipt_route( db.flush()?; - Ok(create_receipt::Response {}) + Ok(create_receipt::v3::Response {}) } diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 1e05bfe2..4843993a 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma}; use ruma::{ - api::client::r0::redact::redact_event, + api::client::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, EventType}, }; @@ -15,8 +15,8 @@ use serde_json::value::to_raw_value; /// - TODO: Handle txn id pub async fn redact_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -52,5 +52,5 @@ pub async fn redact_event_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(redact_event::Response { event_id }) + Ok(redact_event::v3::Response { event_id }) } diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 6274172c..1e47792e 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma}; use ruma::{ - api::client::{error::ErrorKind, r0::room::report_content}, + api::client::{error::ErrorKind, room::report_content}, events::room::message, int, }; @@ -11,8 +11,8 @@ use ruma::{ /// pub async fn report_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pdu = match db.rooms.get_pdu(&body.event_id)? { @@ -68,5 +68,5 @@ pub async fn report_event_route( db.flush()?; - Ok(report_content::Response {}) + Ok(report_content::v3::Response {}) } diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 54559e26..99838ceb 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -4,7 +4,7 @@ use crate::{ use ruma::{ api::client::{ error::ErrorKind, - r0::room::{self, aliases, create_room, get_room_event, upgrade_room}, + room::{self, aliases, create_room, get_room_event, upgrade_room}, }, events::{ room::{ @@ -47,8 +47,10 @@ use tracing::{info, warn}; /// - Send invite events pub async fn create_room_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { + use create_room::v3::RoomPreset; + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let room_id = RoomId::new(db.globals.server_name()); @@ -207,15 +209,15 @@ pub async fn create_room_route( .preset .clone() .unwrap_or_else(|| match &body.visibility { - room::Visibility::Private => create_room::RoomPreset::PrivateChat, - room::Visibility::Public => create_room::RoomPreset::PublicChat, - _ => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom + room::Visibility::Private => RoomPreset::PrivateChat, + room::Visibility::Public => RoomPreset::PublicChat, + _ => RoomPreset::PrivateChat, // Room visibility should not be custom }); let mut users = BTreeMap::new(); users.insert(sender_user.clone(), int!(100)); - if preset == create_room::RoomPreset::TrustedPrivateChat { + if preset == RoomPreset::TrustedPrivateChat { for invite_ in &body.invite { users.insert(invite_.clone(), int!(100)); } @@ -281,7 +283,7 @@ pub async fn create_room_route( PduBuilder { event_type: EventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { - create_room::RoomPreset::PublicChat => JoinRule::Public, + RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default _ => JoinRule::Invite, })) @@ -319,7 +321,7 @@ pub async fn create_room_route( PduBuilder { event_type: EventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { - create_room::RoomPreset::PublicChat => GuestAccess::Forbidden, + RoomPreset::PublicChat => GuestAccess::Forbidden, _ => GuestAccess::CanJoin, })) .expect("event is valid, we just created it"), @@ -408,7 +410,7 @@ pub async fn create_room_route( db.flush()?; - Ok(create_room::Response::new(room_id)) + Ok(create_room::v3::Response::new(room_id)) } /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` @@ -418,8 +420,8 @@ pub async fn create_room_route( /// - You have to currently be joined to the room (TODO: Respect history visibility) pub async fn get_room_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -429,7 +431,7 @@ pub async fn get_room_event_route( )); } - Ok(get_room_event::Response { + Ok(get_room_event::v3::Response { event: db .rooms .get_pdu(&body.event_id)? @@ -445,8 +447,8 @@ pub async fn get_room_event_route( /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable pub async fn get_room_aliases_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !db.rooms.is_joined(sender_user, &body.room_id)? { @@ -456,7 +458,7 @@ pub async fn get_room_aliases_route( )); } - Ok(aliases::Response { + Ok(aliases::v3::Response { aliases: db .rooms .room_aliases(&body.room_id) @@ -477,8 +479,8 @@ pub async fn get_room_aliases_route( /// - Modifies old room power levels to prevent users from speaking pub async fn upgrade_room_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { @@ -702,5 +704,5 @@ pub async fn upgrade_room_route( db.flush()?; // Return the replacement room id - Ok(upgrade_room::Response { replacement_room }) + Ok(upgrade_room::v3::Response { replacement_room }) } diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 5860484e..c83ff2c0 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,7 +1,12 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; -use ruma::api::client::{error::ErrorKind, r0::search::search_events}; +use ruma::api::client::{ + error::ErrorKind, + search::search_events::v3::{ + self as search_events_v3, EventContextResult, ResultCategories, ResultRoomEvents, + SearchResult, + }, +}; -use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; use std::collections::BTreeMap; /// # `POST /_matrix/client/r0/search` @@ -11,8 +16,8 @@ use std::collections::BTreeMap; /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); @@ -97,7 +102,7 @@ pub async fn search_events_route( Some((skip + limit).to_string()) }; - Ok(search_events::Response::new(ResultCategories { + Ok(search_events_v3::Response::new(ResultCategories { room_events: ResultRoomEvents { count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it groups: BTreeMap::new(), // TODO diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c2259c26..2e1ed544 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -3,10 +3,8 @@ use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::{ - session::{get_login_types, login, logout, logout_all}, - uiaa::IncomingUserIdentifier, - }, + session::{get_login_types, login, logout, logout_all}, + uiaa::IncomingUserIdentifier, }, UserId, }; @@ -24,10 +22,10 @@ struct Claims { /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. pub async fn get_login_types_route( - _body: Ruma, -) -> Result { - Ok(get_login_types::Response::new(vec![ - get_login_types::LoginType::Password(Default::default()), + _body: Ruma, +) -> Result { + Ok(get_login_types::v3::Response::new(vec![ + get_login_types::v3::LoginType::Password(Default::default()), ])) } @@ -44,12 +42,12 @@ pub async fn get_login_types_route( /// supported login types. pub async fn login_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { - login::IncomingLoginInfo::Password(login::IncomingPassword { + login::v3::IncomingLoginInfo::Password(login::v3::IncomingPassword { identifier, password, }) => { @@ -86,7 +84,7 @@ pub async fn login_route( user_id } - login::IncomingLoginInfo::Token(login::IncomingToken { token }) => { + login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => { if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( token, @@ -144,7 +142,7 @@ pub async fn login_route( db.flush()?; - Ok(login::Response { + Ok(login::v3::Response { user_id, access_token: token, home_server: Some(db.globals.server_name().to_owned()), @@ -163,8 +161,8 @@ pub async fn login_route( /// - Triggers device list updates pub async fn logout_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -172,7 +170,7 @@ pub async fn logout_route( db.flush()?; - Ok(logout::Response::new()) + Ok(logout::v3::Response::new()) } /// # `POST /_matrix/client/r0/logout/all` @@ -188,8 +186,8 @@ pub async fn logout_route( /// from each device of this user. pub async fn logout_all_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); for device_id in db.users.all_device_ids(sender_user).flatten() { @@ -198,5 +196,5 @@ pub async fn logout_all_route( db.flush()?; - Ok(logout_all::Response::new()) + Ok(logout_all::v3::Response::new()) } diff --git a/src/client_server/state.rs b/src/client_server/state.rs index e334e7de..a97b1872 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -6,7 +6,7 @@ use crate::{ use ruma::{ api::client::{ error::ErrorKind, - r0::state::{get_state_events, get_state_events_for_key, send_state_event}, + state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ room::{ @@ -28,8 +28,8 @@ use ruma::{ /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_key_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( @@ -45,7 +45,7 @@ pub async fn send_state_event_for_key_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(send_state_event::Response { event_id }) + Ok(send_state_event::v3::Response { event_id }) } /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}` @@ -57,8 +57,8 @@ pub async fn send_state_event_for_key_route( /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, - body: Ruma>, -) -> Result> { + body: Ruma>, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled @@ -82,7 +82,7 @@ pub async fn send_state_event_for_empty_key_route( db.flush()?; let event_id = (*event_id).to_owned(); - Ok(send_state_event::Response { event_id }.into()) + Ok(send_state_event::v3::Response { event_id }.into()) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state` @@ -92,8 +92,8 @@ pub async fn send_state_event_for_empty_key_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -121,7 +121,7 @@ pub async fn get_state_events_route( )); } - Ok(get_state_events::Response { + Ok(get_state_events::v3::Response { room_state: db .rooms .room_state_full(&body.room_id)? @@ -138,8 +138,8 @@ pub async fn get_state_events_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_key_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -175,7 +175,7 @@ pub async fn get_state_events_for_key_route( "State event not found.", ))?; - Ok(get_state_events_for_key::Response { + Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, }) @@ -188,8 +188,8 @@ pub async fn get_state_events_for_key_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, - body: Ruma>, -) -> Result> { + body: Ruma>, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] @@ -225,7 +225,7 @@ pub async fn get_state_events_for_empty_key_route( "State event not found.", ))?; - Ok(get_state_events_for_key::Response { + Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index eef65da4..eec4cf6d 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse}; use ruma::{ - api::client::r0::{ + api::client::{ filter::{IncomingFilterDefinition, LazyLoadOptions}, sync::sync_events, uiaa::UiaaResponse, @@ -56,8 +56,8 @@ use tracing::error; /// `since` will be cached pub async fn sync_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result> { + body: Ruma>, +) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let body = body.body; @@ -130,8 +130,8 @@ async fn sync_helper_wrapper( db: Arc, sender_user: Box, sender_device: Box, - body: sync_events::IncomingRequest, - tx: Sender>>, + body: sync_events::v3::IncomingRequest, + tx: Sender>>, ) { let since = body.since.clone(); @@ -172,9 +172,15 @@ async fn sync_helper( db: Arc, sender_user: Box, sender_device: Box, - body: sync_events::IncomingRequest, + body: sync_events::v3::IncomingRequest, // bool = caching allowed -) -> Result<(sync_events::Response, bool), Error> { +) -> Result<(sync_events::v3::Response, bool), Error> { + use sync_events::v3::{ + DeviceLists, Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, + JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, + ToDevice, UnreadNotificationsCount, + }; + // TODO: match body.set_presence { db.rooms.edus.ping_presence(&sender_user)?; @@ -187,8 +193,8 @@ async fn sync_helper( // Load filter let filter = match body.filter { None => IncomingFilterDefinition::default(), - Some(sync_events::IncomingFilter::FilterDefinition(filter)) => filter, - Some(sync_events::IncomingFilter::FilterId(filter_id)) => db + Some(IncomingFilter::FilterDefinition(filter)) => filter, + Some(IncomingFilter::FilterId(filter_id)) => db .users .get_filter(&sender_user, &filter_id)? .unwrap_or_default(), @@ -666,8 +672,8 @@ async fn sync_helper( db.rooms .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; - let joined_room = sync_events::JoinedRoom { - account_data: sync_events::RoomAccountData { + let joined_room = JoinedRoom { + account_data: RoomAccountData { events: db .account_data .changes_since(Some(&room_id), &sender_user, since)? @@ -679,27 +685,27 @@ async fn sync_helper( }) .collect(), }, - summary: sync_events::RoomSummary { + summary: RoomSummary { heroes, joined_member_count: joined_member_count.map(|n| (n as u32).into()), invited_member_count: invited_member_count.map(|n| (n as u32).into()), }, - unread_notifications: sync_events::UnreadNotificationsCount { + unread_notifications: UnreadNotificationsCount { highlight_count, notification_count, }, - timeline: sync_events::Timeline { + timeline: Timeline { limited: limited || joined_since_last_sync, prev_batch, events: room_events, }, - state: sync_events::State { + state: State { events: state_events .iter() .map(|pdu| pdu.to_sync_state_event()) .collect(), }, - ephemeral: sync_events::Ephemeral { events: edus }, + ephemeral: Ephemeral { events: edus }, }; if !joined_room.is_empty() { @@ -767,14 +773,14 @@ async fn sync_helper( left_rooms.insert( room_id.clone(), - sync_events::LeftRoom { - account_data: sync_events::RoomAccountData { events: Vec::new() }, - timeline: sync_events::Timeline { + LeftRoom { + account_data: RoomAccountData { events: Vec::new() }, + timeline: Timeline { limited: false, prev_batch: Some(next_batch_string.clone()), events: Vec::new(), }, - state: sync_events::State { + state: State { events: left_state_events, }, }, @@ -807,8 +813,8 @@ async fn sync_helper( invited_rooms.insert( room_id.clone(), - sync_events::InvitedRoom { - invite_state: sync_events::InviteState { + InvitedRoom { + invite_state: InviteState { events: invite_state_events, }, }, @@ -840,21 +846,21 @@ async fn sync_helper( db.users .remove_to_device_events(&sender_user, &sender_device, since)?; - let response = sync_events::Response { + let response = sync_events::v3::Response { next_batch: next_batch_string, - rooms: sync_events::Rooms { + rooms: Rooms { leave: left_rooms, join: joined_rooms, invite: invited_rooms, knock: BTreeMap::new(), // TODO }, - presence: sync_events::Presence { + presence: Presence { events: presence_updates .into_iter() .map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully")) .collect(), }, - account_data: sync_events::GlobalAccountData { + account_data: GlobalAccountData { events: db .account_data .changes_since(None, &sender_user, since)? @@ -866,12 +872,12 @@ async fn sync_helper( }) .collect(), }, - device_lists: sync_events::DeviceLists { + device_lists: DeviceLists { changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?, - to_device: sync_events::ToDevice { + to_device: ToDevice { events: db .users .get_to_device_events(&sender_user, &sender_device)?, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 29bd9a0b..21cff0bb 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, Result, Ruma}; use ruma::{ - api::client::r0::tag::{create_tag, delete_tag, get_tags}, + api::client::tag::{create_tag, delete_tag, get_tags}, events::{ tag::{TagEvent, TagEventContent}, EventType, @@ -15,8 +15,8 @@ use std::collections::BTreeMap; /// - Inserts the tag into the tag event of the room account data. pub async fn update_tag_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db @@ -42,7 +42,7 @@ pub async fn update_tag_route( db.flush()?; - Ok(create_tag::Response {}) + Ok(create_tag::v3::Response {}) } /// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` @@ -52,8 +52,8 @@ pub async fn update_tag_route( /// - Removes the tag from the tag event of the room account data. pub async fn delete_tag_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db @@ -76,7 +76,7 @@ pub async fn delete_tag_route( db.flush()?; - Ok(delete_tag::Response {}) + Ok(delete_tag::v3::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` @@ -86,11 +86,11 @@ pub async fn delete_tag_route( /// - Gets the tag event of the room account data. pub async fn get_tags_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_tags::Response { + Ok(get_tags::v3::Response { tags: db .account_data .get(Some(&body.room_id), sender_user, EventType::Tag)? diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index 524f3bad..c2c1adfd 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -1,5 +1,5 @@ use crate::{Result, Ruma}; -use ruma::api::client::r0::thirdparty::get_protocols; +use ruma::api::client::thirdparty::get_protocols; use std::collections::BTreeMap; @@ -7,10 +7,10 @@ use std::collections::BTreeMap; /// /// TODO: Fetches all metadata about protocols supported by the homeserver. pub async fn get_protocols_route( - _body: Ruma, -) -> Result { + _body: Ruma, +) -> Result { // TODO - Ok(get_protocols::Response { + Ok(get_protocols::v3::Response { protocols: BTreeMap::new(), }) } diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index e57998f6..6d4fc0ca 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::{ - client::{error::ErrorKind, r0::to_device::send_event_to_device}, + client::{error::ErrorKind, to_device::send_event_to_device}, federation::{self, transactions::edu::DirectDeviceContent}, }, events::EventType, @@ -15,8 +15,8 @@ use ruma::{ /// Send a to-device event to a set of client devices. pub async fn send_event_to_device_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -28,7 +28,7 @@ pub async fn send_event_to_device_route( .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() { - return Ok(send_event_to_device::Response.into()); + return Ok(send_event_to_device::v3::Response.into()); } */ @@ -93,5 +93,5 @@ pub async fn send_event_to_device_route( db.flush()?; - Ok(send_event_to_device::Response {}) + Ok(send_event_to_device::v3::Response {}) } diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index bbc852d2..9d4ba6f8 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,14 +1,15 @@ use crate::{database::DatabaseGuard, utils, Result, Ruma}; -use create_typing_event::Typing; -use ruma::api::client::r0::typing::create_typing_event; +use ruma::api::client::typing::create_typing_event; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. pub async fn create_typing_event_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { + use create_typing_event::v3::Typing; + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let Typing::Yes(duration) = body.state { @@ -24,5 +25,5 @@ pub async fn create_typing_event_route( .typing_remove(sender_user, &body.room_id, &db.globals)?; } - Ok(create_typing_event::Response {}) + Ok(create_typing_event::v3::Response {}) } diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 168f172a..84ac355e 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,7 +1,7 @@ use std::{collections::BTreeMap, iter::FromIterator}; use crate::{Result, Ruma}; -use ruma::api::client::unversioned::get_supported_versions; +use ruma::api::client::discover::get_supported_versions; /// # `GET /_matrix/client/versions` /// diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index cecba7f2..d641848f 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,5 +1,5 @@ use crate::{database::DatabaseGuard, Result, Ruma}; -use ruma::api::client::r0::user_directory::search_users; +use ruma::api::client::user_directory::search_users; /// # `POST /_matrix/client/r0/user_directory/search` /// @@ -8,15 +8,15 @@ use ruma::api::client::r0::user_directory::search_users; /// - TODO: Hide users that are not in any public rooms? pub async fn search_users_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let limit = u64::from(body.limit) as usize; let mut users = db.users.iter().filter_map(|user_id| { // Filter out buggy users (they should not exist, but you never know...) let user_id = user_id.ok()?; - let user = search_users::User { + let user = search_users::v3::User { user_id: user_id.clone(), display_name: db.users.displayname(&user_id).ok()?, avatar_url: db.users.avatar_url(&user_id).ok()?, @@ -47,5 +47,5 @@ pub async fn search_users_route( let results = users.by_ref().take(limit).collect(); let limited = users.next().is_some(); - Ok(search_users::Response { results, limited }) + Ok(search_users::v3::Response { results, limited }) } diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index e9a553a9..6281744b 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -1,6 +1,6 @@ use crate::{database::DatabaseGuard, Result, Ruma}; use hmac::{Hmac, Mac, NewMac}; -use ruma::{api::client::r0::voip::get_turn_server_info, SecondsSinceUnixEpoch}; +use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; use std::time::{Duration, SystemTime}; @@ -11,8 +11,8 @@ type HmacSha1 = Hmac; /// TODO: Returns information about the recommended turn server. pub async fn turn_server_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let turn_secret = db.globals.turn_secret(); @@ -39,7 +39,7 @@ pub async fn turn_server_route( ) }; - Ok(get_turn_server_info::Response { + Ok(get_turn_server_info::v3::Response { username, password, uris: db.globals.turn_uris().to_vec(), diff --git a/src/database/globals.rs b/src/database/globals.rs index c5b2b779..7bc300d2 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -1,7 +1,7 @@ use crate::{database::Config, server_server::FedDest, utils, Error, Result}; use ruma::{ api::{ - client::r0::sync::sync_events, + client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId, @@ -27,8 +27,8 @@ type WellKnownMap = HashMap, (FedDest, String)>; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type SyncHandle = ( - Option, // since - Receiver>>, // rx + Option, // since + Receiver>>, // rx ); pub struct Globals { diff --git a/src/database/key_backups.rs b/src/database/key_backups.rs index 2eefe481..10443f6b 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_backups.rs @@ -1,8 +1,8 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::{ + backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind, - r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, }, serde::Raw, RoomId, UserId, diff --git a/src/database/pusher.rs b/src/database/pusher.rs index bc7017b0..36f8454e 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -2,7 +2,7 @@ use crate::{Database, Error, PduEvent, Result}; use bytes::BytesMut; use ruma::{ api::{ - client::r0::push::{get_pushers, set_pusher, PusherKind}, + client::push::{get_pushers, set_pusher, PusherKind}, push_gateway::send_event_notification::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, @@ -30,7 +30,7 @@ pub struct PushData { impl PushData { #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::Pusher) -> Result<()> { + pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); @@ -53,7 +53,7 @@ impl PushData { } #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { + pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { self.senderkey_pusher .get(senderkey)? .map(|push| { @@ -64,7 +64,7 @@ impl PushData { } #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { + pub fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); @@ -171,7 +171,7 @@ where pub async fn send_push_notice( user: &UserId, unread: UInt, - pusher: &get_pushers::Pusher, + pusher: &get_pushers::v3::Pusher, ruleset: Ruleset, pdu: &PduEvent, db: &Database, @@ -251,7 +251,7 @@ pub fn get_actions<'a>( #[tracing::instrument(skip(unread, pusher, tweaks, event, db))] async fn send_notice( unread: UInt, - pusher: &get_pushers::Pusher, + pusher: &get_pushers::v3::Pusher, tweaks: Vec, event: &PduEvent, db: &Database, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c751167a..3a71a3b5 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2606,7 +2606,7 @@ impl Rooms { .send_federation_request( &db.globals, &remote_server, - federation::membership::get_leave_event::v1::Request { room_id, user_id }, + federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, ) .await; diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index b2244b5d..6b15d721 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -7,7 +7,7 @@ use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; use ruma::{ api::client::{ error::ErrorKind, - r0::uiaa::{ + uiaa::{ AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, UiaaInfo, }, diff --git a/src/database/users.rs b/src/database/users.rs index 681ee284..a66fa93a 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -1,9 +1,6 @@ use crate::{utils, Error, Result}; use ruma::{ - api::client::{ - error::ErrorKind, - r0::{device::Device, filter::IncomingFilterDefinition}, - }, + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, EventType}, identifiers::MxcUri, diff --git a/src/error.rs b/src/error.rs index a16a3abd..206a055f 100644 --- a/src/error.rs +++ b/src/error.rs @@ -4,7 +4,7 @@ use http::StatusCode; use ruma::{ api::client::{ error::{Error as RumaError, ErrorKind}, - r0::uiaa::{UiaaInfo, UiaaResponse}, + uiaa::{UiaaInfo, UiaaResponse}, }, ServerName, }; diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index ee89cc28..119c3ea8 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,6 +1,6 @@ use crate::Error; use ruma::{ - api::client::r0::uiaa::UiaaResponse, + api::client::uiaa::UiaaResponse, identifiers::{DeviceId, UserId}, signatures::CanonicalJsonValue, Outgoing, ServerName, diff --git a/src/server_server.rs b/src/server_server.rs index a4442f05..9dc26170 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -26,7 +26,7 @@ use ruma::{ membership::{ create_invite, create_join_event::{self, RoomState}, - create_join_event_template, + prepare_join_event, }, query::{get_profile_information, get_room_information}, transactions::{ @@ -49,7 +49,7 @@ use ruma::{ }, int, receipt::ReceiptType, - serde::{Base64, JsonObject}, + serde::{Base64, JsonObject, Raw}, signatures::{CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion, StateMap}, to_device::DeviceIdOrAllDevices, @@ -532,7 +532,7 @@ pub async fn get_server_keys_route(db: DatabaseGuard) -> Result Result>() .unwrap() @@ -1981,24 +1982,23 @@ pub(crate) async fn fetch_signing_keys( debug!("Fetching signing keys for {} over federation", origin); - if let Ok(get_keys_response) = db + if let Some(server_key) = db .sending .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) { - db.globals - .add_signing_key(origin, get_keys_response.server_key.clone())?; + db.globals.add_signing_key(origin, server_key.clone())?; result.extend( - get_keys_response - .server_key + server_key .verify_keys .into_iter() .map(|(k, v)| (k.to_string(), v.key)), ); result.extend( - get_keys_response - .server_key + server_key .old_verify_keys .into_iter() .map(|(k, v)| (k.to_string(), v.key)), @@ -2011,7 +2011,7 @@ pub(crate) async fn fetch_signing_keys( for server in db.globals.trusted_servers() { debug!("Asking {} for {}'s signing key", server, origin); - if let Ok(keys) = db + if let Some(server_keys) = db .sending .send_federation_request( &db.globals, @@ -2027,9 +2027,16 @@ pub(crate) async fn fetch_signing_keys( ), ) .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) { - trace!("Got signing keys: {:?}", keys); - for k in keys.server_keys { + trace!("Got signing keys: {:?}", server_keys); + for k in server_keys { db.globals.add_signing_key(origin, k.clone())?; result.extend( k.verify_keys @@ -2538,8 +2545,8 @@ pub async fn get_room_state_ids_route( /// Creates a join template. pub async fn create_join_event_template_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -2701,7 +2708,7 @@ pub async fn create_join_event_template_route( CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), ); - Ok(create_join_event_template::v1::Response { + Ok(prepare_join_event::v1::Response { room_version: Some(room_version_id), event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), }) @@ -3293,6 +3300,8 @@ pub(crate) async fn fetch_join_signing_keys( .write() .map_err(|_| Error::bad_database("RwLock is poisoned."))?; for k in keys.server_keys { + let k = k.deserialize().unwrap(); + // TODO: Check signature from trusted server? servers.remove(&k.server_name); @@ -3332,7 +3341,7 @@ pub(crate) async fn fetch_join_signing_keys( if let (Ok(get_keys_response), origin) = result { let result: BTreeMap<_, _> = db .globals - .add_signing_key(&origin, get_keys_response.server_key.clone())? + .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? .into_iter() .map(|(k, v)| (k.to_string(), v.key)) .collect(); From 557d119bee75d4d506067249b996cad54c00a66c Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Fri, 18 Feb 2022 19:54:26 +0100 Subject: [PATCH 220/445] change search_events_v3 to search_events::v3 --- src/client_server/search.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client_server/search.rs b/src/client_server/search.rs index c83ff2c0..753669a2 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -1,9 +1,9 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, - search::search_events::v3::{ - self as search_events_v3, EventContextResult, ResultCategories, ResultRoomEvents, - SearchResult, + search::search_events::{ + self, + v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, }, }; @@ -16,8 +16,8 @@ use std::collections::BTreeMap; /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); @@ -102,7 +102,7 @@ pub async fn search_events_route( Some((skip + limit).to_string()) }; - Ok(search_events_v3::Response::new(ResultCategories { + Ok(search_events::v3::Response::new(ResultCategories { room_events: ResultRoomEvents { count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it groups: BTreeMap::new(), // TODO From 8f063c99d51deef5d35296555b7b1028a610fc1c Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 18 Feb 2022 22:29:55 +0100 Subject: [PATCH 221/445] chore(ci): Split up tests --- .gitlab-ci.yml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bd4ce791..8660f4ee 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -238,7 +238,7 @@ docker:tags:dockerhub: # Run tests # # --------------------------------------------------------------------- # -test:cargo: +.test-shared-settings: stage: "test" needs: [] image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" @@ -246,13 +246,15 @@ test:cargo: variables: CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow interruptible: true + +test:cargo: + extends: .test-shared-settings before_script: - - rustup component add clippy rustfmt + - rustup component add clippy # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - - cargo fmt --all -- --check - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: @@ -261,6 +263,13 @@ test:cargo: junit: report.xml codequality: gl-code-quality-report.json +test:format: + extends: .test-shared-settings + before_script: + - rustup component add rustfmt + script: + - cargo fmt --all -- --check + test:sytest: stage: "test" allow_failure: true From ad6eb92bbd38889c196d02a5af15313679e7d7cb Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 18 Feb 2022 22:30:02 +0100 Subject: [PATCH 222/445] feat(ci): Add dependency audit to CI tests --- .gitlab-ci.yml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 8660f4ee..3d321b4a 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -270,6 +270,17 @@ test:format: script: - cargo fmt --all -- --check +test:audit: + extends: .test-shared-settings + allow_failure: true + script: + - cargo audit --color always || true + - cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json + artifacts: + when: always + reports: + sast: gl-sast-report.json + test:sytest: stage: "test" allow_failure: true From 94573a3a610556bf3871689fb0fd749521071580 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sat, 19 Feb 2022 17:06:06 +0100 Subject: [PATCH 223/445] improve docker documentation some --- docker/README.md | 37 +++++-- ...fik.yml => docker-compose.for-traefik.yml} | 0 ...raefik.yml => docker-compose.override.yml} | 0 docker/docker-compose.with-traefik.yml | 97 +++++++++++++++++++ 4 files changed, 124 insertions(+), 10 deletions(-) rename docker/{docker-compose.traefik.yml => docker-compose.for-traefik.yml} (100%) rename docker/{docker-compose.override.traefik.yml => docker-compose.override.yml} (100%) create mode 100644 docker/docker-compose.with-traefik.yml diff --git a/docker/README.md b/docker/README.md index d8867385..0a5981df 100644 --- a/docker/README.md +++ b/docker/README.md @@ -38,16 +38,28 @@ or you can skip the build step and pull the image from one of the following regi [gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 [shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest -The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). -You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need -to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. +The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` +config file, an example can be found [here](../conduit-example.toml). You can pass in different env +vars to change config values on the fly. You can even configure Conduit completely by using env +vars, but for that you need to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of +possible values, please take a look at the `docker-compose.yml` file. If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. ## Docker-compose -If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying -Conduit can be found [here](../DEPLOY.md). +If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. + +Depending on your proxy setup, you can use one of the following files; +- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) +- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml) +- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml) + +When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and +rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want +for your server. + +Additional info about deploying Conduit can be found [here](../DEPLOY.md). ### Build @@ -71,11 +83,16 @@ docker-compose up -d ### Use Traefik as Proxy -As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making containerized app and services available through the web. With the -two provided files, [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml), it is -equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is -the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to either expose ports -`443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`. +As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making +containerized app and services available through the web. With the two provided files, +[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or +[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and +[`docker-compose.override.yml`](docker-compose.override.traefik.yml), it is equally easy to deploy +and use Conduit, with a little caveat. If you already took a look at the files, then you should have +seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and +loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to +either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and +`.well-known/matrix/server`. With the service `well-known` we use a single `nginx` container that will serve those two files. diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.for-traefik.yml similarity index 100% rename from docker/docker-compose.traefik.yml rename to docker/docker-compose.for-traefik.yml diff --git a/docker/docker-compose.override.traefik.yml b/docker/docker-compose.override.yml similarity index 100% rename from docker/docker-compose.override.traefik.yml rename to docker/docker-compose.override.yml diff --git a/docker/docker-compose.with-traefik.yml b/docker/docker-compose.with-traefik.yml new file mode 100644 index 00000000..6d46827f --- /dev/null +++ b/docker/docker-compose.with-traefik.yml @@ -0,0 +1,97 @@ +# Conduit - Behind Traefik Reverse Proxy +version: '3' + +services: + homeserver: + ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, + ### then you are ready to go. + image: matrixconduit/matrix-conduit:latest + ### If you want to build a fresh image from the sources, then comment the image line and uncomment the + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + # build: + # context: . + # args: + # CREATED: '2021-03-16T08:18:27Z' + # VERSION: '0.1.0' + # LOCAL: 'false' + # GIT_REF: origin/master + restart: unless-stopped + volumes: + - db:/srv/conduit/.local/share/conduit + ### Uncomment if you want to use conduit.toml to configure Conduit + ### Note: Set env vars will override conduit.toml values + # - ./conduit.toml:/srv/conduit/conduit.toml + networks: + - proxy + environment: + CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + CONDUIT_ALLOW_REGISTRATION : 'true' + ### Uncomment and change values as desired + # CONDUIT_ADDRESS: 0.0.0.0 + # CONDUIT_PORT: 6167 + # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' + # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging + # CONDUIT_LOG: info # default is: "info,_=off,sled=off" + # CONDUIT_ALLOW_JAEGER: 'false' + # CONDUIT_ALLOW_ENCRYPTION: 'false' + # CONDUIT_ALLOW_FEDERATION: 'false' + # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit + # CONDUIT_WORKERS: 10 + # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + + # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container + # to serve those two as static files. If you want to use a different way, delete or comment the below service, here + # and in the docker-compose override file. + well-known: + image: nginx:latest + restart: unless-stopped + volumes: + - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files + - ./nginx/www:/var/www/ # location of the client and server .well-known-files + + ### Uncomment if you want to use your own Element-Web App. + ### Note: You need to provide a config.json for Element and you also need a second + ### Domain or Subdomain for the communication between Element and Conduit + ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md + # element-web: + # image: vectorim/element-web:latest + # restart: unless-stopped + # volumes: + # - ./element_config.json:/app/config.json + # networks: + # - proxy + # depends_on: + # - homeserver + + traefik: + image: "traefik:latest" + container_name: "traefik" + restart: "unless-stopped" + ports: + - "80:80" + - "443:443" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + # - "./traefik_config:/etc/traefik" + - "acme:/etc/traefik/acme" + labels: + - "traefik.enable=true" + + # middleware redirect + - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" + # global redirect to https + - "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)" + - "traefik.http.routers.redirs.entrypoints=http" + - "traefik.http.routers.redirs.middlewares=redirect-to-https" + + networks: + - proxy + +volumes: + db: + acme: + +networks: + proxy: \ No newline at end of file From cc1472788815d9daadb53b085dd969bcd7e39741 Mon Sep 17 00:00:00 2001 From: Jonathan de Jong Date: Sun, 20 Feb 2022 10:55:17 +0100 Subject: [PATCH 224/445] revert reflow --- docker/README.md | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/docker/README.md b/docker/README.md index 0a5981df..1c9a03d1 100644 --- a/docker/README.md +++ b/docker/README.md @@ -38,11 +38,9 @@ or you can skip the build step and pull the image from one of the following regi [gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 [shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest -The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` -config file, an example can be found [here](../conduit-example.toml). You can pass in different env -vars to change config values on the fly. You can even configure Conduit completely by using env -vars, but for that you need to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of -possible values, please take a look at the `docker-compose.yml` file. +The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). +You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need +to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible values, please take a look at the `docker-compose.yml` file. If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. From 5a80507006eb27403a0dcf9d42607d9fa781a8fc Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sun, 20 Feb 2022 11:12:49 +0100 Subject: [PATCH 225/445] chore(docs): Remove the now obsolete cross readme --- cross/README.md | 37 ------------------------------------- 1 file changed, 37 deletions(-) delete mode 100644 cross/README.md diff --git a/cross/README.md b/cross/README.md deleted file mode 100644 index 2829d239..00000000 --- a/cross/README.md +++ /dev/null @@ -1,37 +0,0 @@ -## Cross compilation - -The `cross` folder contains a set of convenience scripts (`build.sh` and `test.sh`) for cross-compiling Conduit. - -Currently supported targets are - -- aarch64-unknown-linux-musl -- arm-unknown-linux-musleabihf -- armv7-unknown-linux-musleabihf -- x86\_64-unknown-linux-musl - -### Install prerequisites -#### Docker -[Installation guide](https://docs.docker.com/get-docker/). -```sh -$ sudo apt install docker -$ sudo systemctl start docker -$ sudo usermod -aG docker $USER -$ newgrp docker -``` - -#### Cross -[Installation guide](https://github.com/rust-embedded/cross/#installation). -```sh -$ cargo install cross -``` - -### Buiding Conduit -```sh -$ TARGET=armv7-unknown-linux-musleabihf ./cross/build.sh --release -``` -The cross-compiled binary is at `target/armv7-unknown-linux-musleabihf/release/conduit` - -### Testing Conduit -```sh -$ TARGET=armv7-unknown-linux-musleabihf ./cross/test.sh --release -``` From 196c83939c38cce47bba054e533b8ebee0ac6310 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 6 Feb 2022 20:23:22 +0100 Subject: [PATCH 226/445] Add show-config admin room command --- src/config.rs | 92 +++++++++++++++++++++++++++++++++++++++++ src/database/admin.rs | 7 ++++ src/database/globals.rs | 2 +- 3 files changed, 100 insertions(+), 1 deletion(-) diff --git a/src/config.rs b/src/config.rs index 155704b7..a6ab63e3 100644 --- a/src/config.rs +++ b/src/config.rs @@ -1,5 +1,6 @@ use std::{ collections::BTreeMap, + fmt, net::{IpAddr, Ipv4Addr}, }; @@ -97,6 +98,97 @@ impl Config { } } +impl fmt::Display for Config { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Prepare a list of config values to show + let lines = [ + ("Server name", self.server_name.host()), + ("Database backend", &self.database_backend), + ("Database path", &self.database_path), + ( + "Database cache capacity (MB)", + &self.db_cache_capacity_mb.to_string(), + ), + ( + "Cache capacity modifier", + &self.conduit_cache_capacity_modifier.to_string(), + ), + #[cfg(feature = "rocksdb")] + ( + "Maximum open files for RocksDB", + &self.rocksdb_max_open_files.to_string(), + ), + ("PDU cache capacity", &self.pdu_cache_capacity.to_string()), + ( + "Cleanup interval in seconds", + &self.cleanup_second_interval.to_string(), + ), + ("Maximum request size", &self.max_request_size.to_string()), + ( + "Maximum concurrent requests", + &self.max_concurrent_requests.to_string(), + ), + ("Allow registration", &self.allow_registration.to_string()), + ("Allow encryption", &self.allow_encryption.to_string()), + ("Allow federation", &self.allow_federation.to_string()), + ("Allow room creation", &self.allow_room_creation.to_string()), + ( + "JWT secret", + match self.jwt_secret { + Some(_) => "set", + None => "not set", + }, + ), + ("Trusted servers", { + let mut lst = vec![]; + for server in &self.trusted_servers { + lst.push(server.host()); + } + &lst.join(", ") + }), + ( + "TURN username", + if self.turn_username.is_empty() { + "not set" + } else { + &self.turn_username + }, + ), + ("TURN password", { + if self.turn_password.is_empty() { + "not set" + } else { + "set" + } + }), + ("TURN secret", { + if self.turn_secret.is_empty() { + "not set" + } else { + "set" + } + }), + ("Turn TTL", &self.turn_ttl.to_string()), + ("Turn URIs", { + let mut lst = vec![]; + for item in self.turn_uris.to_vec().into_iter().enumerate() { + let (_, uri): (usize, String) = item; + lst.push(uri); + } + &lst.join(", ") + }), + ]; + + let mut msg: String = "Active config values:\n\n".to_string(); + + for line in lines.into_iter().enumerate() { + msg += &format!("{}: {}\n", line.1 .0, line.1 .1); + } + + write!(f, "{}", msg) + } +} + fn false_fn() -> bool { false } diff --git a/src/database/admin.rs b/src/database/admin.rs index f9d4f425..f2e66e43 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -259,6 +259,9 @@ enum AdminCommand { /// Print database memory usage statistics DatabaseMemoryUsage, + + /// Show configuration values + ShowConfig, } fn process_admin_command( @@ -428,6 +431,10 @@ fn process_admin_command( e )), }, + AdminCommand::ShowConfig => { + // Construct and send the response + RoomMessageEventContent::text_plain(format!("{}", db.globals.config)) + } }; Ok(reply_message_content) diff --git a/src/database/globals.rs b/src/database/globals.rs index 7bc300d2..9a9163be 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -35,7 +35,7 @@ pub struct Globals { pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, pub(super) globals: Arc, - config: Config, + pub config: Config, keypair: Arc, dns_resolver: TokioAsyncResolver, jwt_decoding_key: Option>, From 949f2523f900604e688129563d952f03fc6e0bb3 Mon Sep 17 00:00:00 2001 From: TomZ Date: Mon, 21 Feb 2022 22:35:08 +0100 Subject: [PATCH 227/445] Fix permissions The text just sets the ownership and ignores that defaults on unix are to have newly created dirs be readable by everyone. This closes the database to unauthorized users on multi-user systems. --- DEPLOY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DEPLOY.md b/DEPLOY.md index a28218d7..3d1b780e 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -150,6 +150,7 @@ If you use the default database path you also need to run this: ```bash sudo mkdir -p /var/lib/matrix-conduit/ sudo chown -R conduit:nogroup /var/lib/matrix-conduit/ +sudo chmod 700 /var/lib/matrix-conduit/ ``` ## Setting up the Reverse Proxy From 3b2b35aab738875fe07b155d9d97a6fbefca6757 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Tue, 22 Feb 2022 00:26:53 +0100 Subject: [PATCH 228/445] Log caught Ctrl+C or SIGTERM for operator feedback --- src/main.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/main.rs b/src/main.rs index 6aa08704..c49c5ea7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -37,6 +37,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; +use tracing::warn; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate @@ -346,11 +347,14 @@ async fn shutdown_signal(handle: ServerHandle) { #[cfg(not(unix))] let terminate = std::future::pending::<()>(); + let sig: &str; + tokio::select! { - _ = ctrl_c => {}, - _ = terminate => {}, + _ = ctrl_c => { sig = "Ctrl+C"; }, + _ = terminate => { sig = "SIGTERM"; }, } + warn!("Received {}, shutting down...", sig); handle.graceful_shutdown(Some(Duration::from_secs(30))); } From a5bb6786c8688134c5f6df3ed3c02ef85eb9f14e Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 22 Feb 2022 15:22:53 +0000 Subject: [PATCH 229/445] fix(docker): Make conduit own default db path MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When a user mounts a volume into the default volume path, it uses the permissions and ownership from the host volume. In most cases, this is 1000:1000, which it also uses on the inside. If you don't mount a volume though (e.g., for testing), conduit cries: “The database couldn't be loaded or created.” This fix chowns the default db dir to remedy this. --- docker/ci-binaries-packaging.Dockerfile | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index ee1ca4ca..6defc3d1 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -48,8 +48,6 @@ LABEL org.opencontainers.image.created=${CREATED} \ org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ org.opencontainers.image.ref.name="" -# Created directory for the database and media files -RUN mkdir -p ${DEFAULT_DB_PATH} # Test if Conduit is still alive, uses the same endpoint as Element COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh @@ -67,7 +65,9 @@ RUN set -x ; \ # Change ownership of Conduit files to conduit user and group RUN chown -cR conduit:conduit /srv/conduit && \ - chmod +x /srv/conduit/healthcheck.sh + chmod +x /srv/conduit/healthcheck.sh && \ + mkdir -p ${DEFAULT_DB_PATH} && \ + chown -cR conduit:conduit ${DEFAULT_DB_PATH} # Change user to conduit USER conduit From 65fa4b2ca4c2524cad8c11bbc9a33b193e267c57 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Tue, 22 Feb 2022 22:31:34 +0100 Subject: [PATCH 230/445] Fix proxy config examples in config/proxy.rs --- src/config/proxy.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/config/proxy.rs b/src/config/proxy.rs index fb0387c9..dcf304e9 100644 --- a/src/config/proxy.rs +++ b/src/config/proxy.rs @@ -10,13 +10,13 @@ use crate::Result; /// ``` /// - Global proxy /// ```toml -/// [proxy] +/// [global.proxy] /// global = { url = "socks5h://localhost:9050" } /// ``` /// - Proxy some domains /// ```toml -/// [proxy] -/// [[proxy.by_domain]] +/// [global.proxy] +/// [[global.proxy.by_domain]] /// url = "socks5h://localhost:9050" /// include = ["*.onion", "matrix.myspecial.onion"] /// exclude = ["*.myspecial.onion"] From 5c6c6f272cdb805c2253a6957a142e6dcaa1fa56 Mon Sep 17 00:00:00 2001 From: TomZ Date: Mon, 21 Feb 2022 22:28:13 +0100 Subject: [PATCH 231/445] Fix security issue. The docs state that you need to make the config file _readable_ and then proceeds to make the file writable. This changes it to make the file to be owned by root and readable by anyone. This is the default for unix / linux and suggested practice for files in /etc. --- DEPLOY.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index a28218d7..60634825 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -142,7 +142,8 @@ As we are using a Conduit specific user we need to allow it to read the config. Debian: ```bash -sudo chown -R conduit:nogroup /etc/matrix-conduit +sudo chown -R root:root /etc/matrix-conduit +sudo chmod 755 /etc/matrix-conduit ``` If you use the default database path you also need to run this: From 9f059ad4c3a0ec0edcc6a086b5a755a8df6826a1 Mon Sep 17 00:00:00 2001 From: reti4 Date: Tue, 1 Mar 2022 21:03:55 +0000 Subject: [PATCH 232/445] make username login case insensitive --- src/client_server/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 2e1ed544..c4a71071 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -57,7 +57,7 @@ pub async fn login_route( return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; let user_id = - UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + UserId::parse_with_server_name(username.to_lowercase().to_owned(), db.globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; From 9385ea0e7c7b7e87ae49ddb17cd75d84080e91ca Mon Sep 17 00:00:00 2001 From: reti4 Date: Tue, 1 Mar 2022 21:23:34 +0000 Subject: [PATCH 233/445] fmt fix --- src/client_server/session.rs | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c4a71071..0bbae145 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -56,11 +56,11 @@ pub async fn login_route( } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; - let user_id = - UserId::parse_with_server_name(username.to_lowercase().to_owned(), db.globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; + let user_id = UserId::parse_with_server_name( + username.to_lowercase().to_owned(), + db.globals.server_name(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( ErrorKind::Forbidden, "Wrong username or password.", From 8bafdc4623edeb72496511c2e561ab213f46db80 Mon Sep 17 00:00:00 2001 From: reti4 Date: Wed, 2 Mar 2022 02:25:15 +0000 Subject: [PATCH 234/445] fixed location of lowercase fn --- src/client_server/session.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index 0bbae145..c78f6001 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -52,15 +52,15 @@ pub async fn login_route( password, }) => { let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { - matrix_id + matrix_id.to_lowercase() } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; - let user_id = UserId::parse_with_server_name( - username.to_lowercase().to_owned(), - db.globals.server_name(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let user_id = + UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") + })?; let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( ErrorKind::Forbidden, "Wrong username or password.", From 5695121f38af06f7af7acb84299705dc9a2f1c43 Mon Sep 17 00:00:00 2001 From: chenyuqide Date: Wed, 2 Mar 2022 23:48:01 +0800 Subject: [PATCH 235/445] Fix wrong associated type in OutgoingKind::Appservice --- src/database/sending.rs | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/database/sending.rs b/src/database/sending.rs index 2d64be15..b7d62c12 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -38,7 +38,7 @@ use super::abstraction::Tree; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { - Appservice(Box), + Appservice(String), Push(Vec, Vec), // user and pushkey Normal(Box), } @@ -505,7 +505,7 @@ impl Sending { let db = db.read().await; match &kind { - OutgoingKind::Appservice(server) => { + OutgoingKind::Appservice(id) => { let mut pdu_jsons = Vec::new(); for event in &events { @@ -535,7 +535,7 @@ impl Sending { let response = appservice_server::send_request( &db.globals, db.appservice - .get_registration(server.as_str()) + .get_registration(&id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { ( @@ -756,9 +756,7 @@ impl Sending { })?; ( - OutgoingKind::Appservice(ServerName::parse(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?), + OutgoingKind::Appservice(server), if value.is_empty() { SendingEventType::Pdu(event.to_vec()) } else { From 5a9462c9ab5a9d7ffe48644bb17689be4df56020 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 8 Mar 2022 21:31:54 +0000 Subject: [PATCH 236/445] fix(ci): Fix musl builds This pins the image to use for cross to a working image's sha256 --- .gitlab-ci.yml | 2 +- Cross.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 3d321b4a..bf68e25c 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -26,7 +26,7 @@ variables: - if: "$CI_COMMIT_TAG" - if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set. interruptible: true - image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools@sha256:69ab327974aef4cc0daf4273579253bf7ae5e379a6c52729b83137e4caa9d093" tags: ["docker"] services: ["docker:dind"] variables: diff --git a/Cross.toml b/Cross.toml index a1387b43..5d99a358 100644 --- a/Cross.toml +++ b/Cross.toml @@ -20,4 +20,4 @@ image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-lin image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" [target.x86_64-unknown-linux-musl] -image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl:latest" +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl@sha256:b6d689e42f0236c8a38b961bca2a12086018b85ed20e0826310421daf182e2bb" From 194a85d4c5b4872e412c1bd4d93c9d4a85053bc5 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sat, 12 Mar 2022 15:42:18 +0100 Subject: [PATCH 237/445] Use native root CA certificates for reqwest --- Cargo.lock | 78 +++++++++++++++++++++++++++++++++++++++++++++++------- Cargo.toml | 2 +- 2 files changed, 69 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 997cedc6..c45fa264 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -449,6 +449,22 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" +[[package]] +name = "core-foundation" +version = "0.9.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" +dependencies = [ + "core-foundation-sys", + "libc", +] + +[[package]] +name = "core-foundation-sys" +version = "0.8.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" + [[package]] name = "cpufeatures" version = "0.2.1" @@ -1577,6 +1593,12 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" +[[package]] +name = "openssl-probe" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" + [[package]] name = "opentelemetry" version = "0.16.0" @@ -2028,6 +2050,7 @@ dependencies = [ "percent-encoding", "pin-project-lite", "rustls", + "rustls-native-certs", "rustls-pemfile", "serde", "serde_json", @@ -2039,7 +2062,6 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "webpki-roots", "winreg 0.7.0", ] @@ -2385,6 +2407,18 @@ dependencies = [ "webpki", ] +[[package]] +name = "rustls-native-certs" +version = "0.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +dependencies = [ + "openssl-probe", + "rustls-pemfile", + "schannel", + "security-framework", +] + [[package]] name = "rustls-pemfile" version = "0.2.1" @@ -2400,6 +2434,16 @@ version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +[[package]] +name = "schannel" +version = "0.1.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +dependencies = [ + "lazy_static", + "winapi", +] + [[package]] name = "scopeguard" version = "1.1.0" @@ -2416,6 +2460,29 @@ dependencies = [ "untrusted", ] +[[package]] +name = "security-framework" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +dependencies = [ + "bitflags", + "core-foundation", + "core-foundation-sys", + "libc", + "security-framework-sys", +] + +[[package]] +name = "security-framework-sys" +version = "2.6.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" +dependencies = [ + "core-foundation-sys", + "libc", +] + [[package]] name = "serde" version = "1.0.134" @@ -3238,15 +3305,6 @@ dependencies = [ "untrusted", ] -[[package]] -name = "webpki-roots" -version = "0.22.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "552ceb903e957524388c4d3475725ff2c8b7960922063af6ce53c9a43da07449" -dependencies = [ - "webpki", -] - [[package]] name = "weezl" version = "0.1.5" diff --git a/Cargo.toml b/Cargo.toml index b9affa76..c24c7cc4 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -48,7 +48,7 @@ rand = "0.8.4" # Used to hash passwords rust-argon2 = "0.8.3" # Used to send requests -reqwest = { default-features = false, features = ["rustls-tls", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } +reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type thiserror = "1.0.28" # Used to generate thumbnails for images From b5b81818516555f23ea77a00efb6cc0b7e5f3b81 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Tue, 22 Feb 2022 00:02:01 +0100 Subject: [PATCH 238/445] Notify admin room for user registrations, deactivations and password changes --- src/client_server/account.rs | 22 ++++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 1ff0fa08..32488f22 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -17,6 +17,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, + room::message::RoomMessageEventContent, EventType, }, push, UserId, @@ -230,7 +231,12 @@ pub async fn register_route( body.initial_device_display_name.clone(), )?; - info!("{} registered on this server", user_id); + info!("New user {} registered on this server.", user_id); + db.admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "New user {} registered on this server.", + user_id + ))); // If this is the first real user, grant them admin privileges // Note: the server user, @conduit:servername, is generated first @@ -319,6 +325,13 @@ pub async fn change_password_route( db.flush()?; + info!("User {} changed their password.", sender_user); + db.admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "User {} changed their password.", + sender_user + ))); + Ok(change_password::v3::Response {}) } @@ -436,7 +449,12 @@ pub async fn deactivate_route( // Remove devices and mark account as deactivated db.users.deactivate_account(sender_user)?; - info!("{} deactivated their account", sender_user); + info!("User {} deactivated their account.", sender_user); + db.admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "User {} deactivated their account.", + sender_user + ))); db.flush()?; From 61277452af96aa2c9a50bbd0ea206d1856b53918 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Fri, 18 Mar 2022 18:05:16 +0100 Subject: [PATCH 239/445] chore(docker): Bump alpine (base image) version --- .gitlab-ci.yml | 3 ++- docker/ci-binaries-packaging.Dockerfile | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index bf68e25c..380332b1 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -316,9 +316,10 @@ test:sytest: test:dockerlint: stage: "test" needs: [] - image: "ghcr.io/hadolint/hadolint:latest-alpine" + image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine interruptible: true script: + - hadolint --version # First pass: Print for CI log: - > hadolint diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 6defc3d1..1a318714 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -7,7 +7,7 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.15.0 AS runner +FROM docker.io/alpine:3.15.1 AS runner # Standard port on which Conduit launches. From 1ebf417c1191df984850c2208d4baa871b82f5cb Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 30 Mar 2022 20:23:04 +0000 Subject: [PATCH 240/445] chore: Bump alpine version for CI generated docker --- docker/ci-binaries-packaging.Dockerfile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 1a318714..6964a02f 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -7,7 +7,8 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine:3.15.1 AS runner +FROM docker.io/alpine@sha256:b66bccf2e0cca8e5fb79f7d3c573dd76c4787d1d883f5afe6c9d136a260bba07 AS runner +# = alpine:3.15.3 # Standard port on which Conduit launches. From 4a12a7cbc882375fc66df49e046b503f047573b9 Mon Sep 17 00:00:00 2001 From: LordMZTE Date: Thu, 31 Mar 2022 20:59:59 +0200 Subject: [PATCH 241/445] Fix crash when a bad user ID is in the database To my understanding, a bad user ID can sometimes make it into the database, which lead to a panic prior to this change. --- src/client_server/sync.rs | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index eec4cf6d..5f34fa6b 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -405,10 +405,11 @@ async fn sync_helper( continue; } }; - lazy_loaded.insert( - UserId::parse(state_key.as_ref()) - .expect("they are in timeline_users, so they should be correct"), - ); + + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(state_key.as_ref()) { + lazy_loaded.insert(uid); + } state_events.push(pdu); } } From db0659cb3db588f3ef08aad866e9f0e631bf8dcb Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 31 Mar 2022 22:50:17 +0200 Subject: [PATCH 242/445] Upgrade axum to 0.5 --- Cargo.lock | 14 +++++++------- Cargo.toml | 2 +- src/ruma_wrapper/axum.rs | 4 ++-- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c45fa264..1a60e655 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -124,9 +124,9 @@ checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" [[package]] name = "axum" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "310a147401c66e79fc78636e4db63ac68cd6acb9ece056de806ea173a15bce32" +checksum = "5611d4977882c5af1c0f7a34d51b5d87f784f86912bb543986b014ea4995ef93" dependencies = [ "async-trait", "axum-core", @@ -137,6 +137,7 @@ dependencies = [ "http", "http-body", "hyper", + "itoa 1.0.1", "matchit", "memchr", "mime", @@ -147,7 +148,6 @@ dependencies = [ "serde_urlencoded", "sync_wrapper", "tokio", - "tokio-util", "tower", "tower-http", "tower-layer", @@ -156,9 +156,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ca6c0b218388a7ed6a8d25e94f7dea5498daaa4fd8c711fb3ff166041b06fda" +checksum = "95cd109b3e93c9541dcce5b0219dcf89169dcc58c1bebed65082808324258afb" dependencies = [ "async-trait", "bytes", @@ -1427,9 +1427,9 @@ checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" [[package]] name = "matchit" -version = "0.4.4" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58b6f41fdfbec185dd3dff58b51e323f5bc61692c0de38419a957b0dcfccca3c" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] name = "memchr" diff --git a/Cargo.toml b/Cargo.toml index c24c7cc4..1a1bb667 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2021" [dependencies] # Web framework -axum = { version = "0.4.4", features = ["headers"], optional = true } +axum = { version = "0.5.0", features = ["headers"], optional = true } axum-server = { version = "0.3.3", features = ["tls-rustls"] } tower = { version = "0.4.11", features = ["util"] } tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index d8e7f51a..c779e335 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -154,6 +154,7 @@ where TypedHeaderRejectionReason::Error(_) => { "Invalid X-Matrix signatures." } + _ => "Unknown header-related error", }; Error::BadRequest(ErrorKind::Forbidden, msg) @@ -247,8 +248,7 @@ where }; let mut http_request = http::Request::builder().uri(req.uri()).method(req.method()); - *http_request.headers_mut().unwrap() = - req.headers().expect("Headers already extracted").clone(); + *http_request.headers_mut().unwrap() = req.headers().clone(); if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { let user_id = sender_user.clone().unwrap_or_else(|| { From 3933bd9a8e73b408bcba44579ba60e3ee35dae8b Mon Sep 17 00:00:00 2001 From: Jonas Platte Date: Thu, 31 Mar 2022 22:52:16 +0200 Subject: [PATCH 243/445] Update axum feature set used --- Cargo.lock | 1 - Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 1a60e655..66daf5e6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -145,7 +145,6 @@ dependencies = [ "pin-project-lite", "serde", "serde_json", - "serde_urlencoded", "sync_wrapper", "tokio", "tower", diff --git a/Cargo.toml b/Cargo.toml index 1a1bb667..627829f0 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,7 +14,7 @@ edition = "2021" [dependencies] # Web framework -axum = { version = "0.5.0", features = ["headers"], optional = true } +axum = { version = "0.5.0", default-features = false, features = ["headers", "http1", "http2", "json", "matched-path"], optional = true } axum-server = { version = "0.3.3", features = ["tls-rustls"] } tower = { version = "0.4.11", features = ["util"] } tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } From a5465dfd3eb57c50e5707d90351cf73120986eff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 1 Apr 2022 16:00:04 +0200 Subject: [PATCH 244/445] fix: allow trailing slashes for /state// again --- src/main.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/src/main.rs b/src/main.rs index 6aa08704..beb24361 100644 --- a/src/main.rs +++ b/src/main.rs @@ -273,6 +273,22 @@ fn routes() -> Router { get(client_server::get_state_events_for_empty_key_route) .put(client_server::send_state_event_for_empty_key_route), ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + // These two endpoints allow trailing slashes + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type/", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type/", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) .ruma_route(client_server::sync_events_route) .ruma_route(client_server::get_context_route) .ruma_route(client_server::get_message_events_route) From 9046223e7f649314671ab6f18cf606d5442f36f1 Mon Sep 17 00:00:00 2001 From: Andrej Kacian Date: Sun, 20 Mar 2022 15:21:03 +0100 Subject: [PATCH 245/445] Send PDU to appservice if state_key is their user ID Fixes #110. --- src/database/rooms.rs | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3a71a3b5..ef544744 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1970,6 +1970,30 @@ impl Rooms { continue; } + // If the RoomMember event has a non-empty state_key, it is targeted at someone. + // If it is our appservice user, we send this PDU to it. + if pdu.kind == EventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + if let Some(appservice_uid) = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, db.globals.server_name()).ok() + }) + { + if state_key_uid == &appservice_uid { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + continue; + } + } + } + } + if let Some(namespaces) = appservice.1.get("namespaces") { let users = namespaces .get("users") From 1712e63e069de10a8997b46a3547562d9c33b2b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 3 Apr 2022 18:58:45 +0200 Subject: [PATCH 246/445] fix: fix kick and ban events over federation Fix the scenario where a MembershipState change event was not sent to the server of a user kicked/banned from a room on a Conduit instance if there were not any other users from that server in the room. --- src/database/rooms.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 3a71a3b5..5b86b2f4 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1957,12 +1957,24 @@ impl Rooms { // where events in the current room state do not exist self.set_room_state(room_id, statehashid)?; - let servers = self + let mut servers: HashSet> = self .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); + .filter(|server| &**server != db.globals.server_name()) + .collect(); + + // In case we are kicking or banning a user, we need to inform their server of the change + if pdu.kind == EventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + servers.insert(Box::from(state_key_uid.server_name())); + } + } - db.sending.send_pdu(servers, &pdu_id)?; + db.sending.send_pdu(servers.into_iter(), &pdu_id)?; for appservice in db.appservice.all()? { if self.appservice_in_room(room_id, &appservice, db)? { From 414c7c40c4f215cae20917310ae69a4df66152b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 3 Apr 2022 19:19:57 +0200 Subject: [PATCH 247/445] fix: remove our server from the list of servers to send the event PDU to --- src/database/rooms.rs | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 5b86b2f4..bf34cdc2 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1957,11 +1957,8 @@ impl Rooms { // where events in the current room state do not exist self.set_room_state(room_id, statehashid)?; - let mut servers: HashSet> = self - .room_servers(room_id) - .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()) - .collect(); + let mut servers: HashSet> = + self.room_servers(room_id).filter_map(|r| r.ok()).collect(); // In case we are kicking or banning a user, we need to inform their server of the change if pdu.kind == EventType::RoomMember { @@ -1974,6 +1971,8 @@ impl Rooms { } } + servers.remove(db.globals.server_name()); + db.sending.send_pdu(servers.into_iter(), &pdu_id)?; for appservice in db.appservice.all()? { From a08c667230ff5bb6f93b15743230111045273f76 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 3 Apr 2022 19:27:48 +0200 Subject: [PATCH 248/445] docs: add comments for clarification of recent changes --- src/database/rooms.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index bf34cdc2..5cbe56a9 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1971,6 +1971,7 @@ impl Rooms { } } + // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above servers.remove(db.globals.server_name()); db.sending.send_pdu(servers.into_iter(), &pdu_id)?; From 21bc099ccf08197a72878ca6e1a7dd5db1c71346 Mon Sep 17 00:00:00 2001 From: chenyuqide Date: Fri, 4 Mar 2022 08:08:32 +0800 Subject: [PATCH 249/445] Update ruma --- Cargo.lock | 64 +++++++++++++++++++----------------- Cargo.toml | 2 +- src/client_server/session.rs | 4 +-- src/database/uiaa.rs | 4 +-- 4 files changed, 38 insertions(+), 36 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 66daf5e6..c48fdea6 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2102,7 +2102,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "assign", "js_int", @@ -2122,8 +2122,8 @@ dependencies = [ [[package]] name = "ruma-api" -version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.20.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "bytes", "http", @@ -2139,8 +2139,8 @@ dependencies = [ [[package]] name = "ruma-api-macros" -version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.20.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2151,7 +2151,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "ruma-api", "ruma-common", @@ -2164,8 +2164,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.13.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "assign", "bytes", @@ -2184,8 +2184,8 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "indexmap", "js_int", @@ -2199,8 +2199,8 @@ dependencies = [ [[package]] name = "ruma-events" -version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.26.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "indoc", "js_int", @@ -2216,8 +2216,8 @@ dependencies = [ [[package]] name = "ruma-events-macros" -version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.26.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2228,7 +2228,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "js_int", "ruma-api", @@ -2242,8 +2242,8 @@ dependencies = [ [[package]] name = "ruma-identifiers" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.22.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "percent-encoding", "rand 0.8.4", @@ -2252,13 +2252,14 @@ dependencies = [ "ruma-serde", "ruma-serde-macros", "serde", + "url", "uuid", ] [[package]] name = "ruma-identifiers-macros" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.22.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "quote", "ruma-identifiers-validation", @@ -2267,16 +2268,17 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "thiserror", + "url", ] [[package]] name = "ruma-identity-service-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "js_int", "ruma-api", @@ -2289,7 +2291,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "js_int", "ruma-api", @@ -2303,8 +2305,8 @@ dependencies = [ [[package]] name = "ruma-serde" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "base64 0.13.0", "bytes", @@ -2318,8 +2320,8 @@ dependencies = [ [[package]] name = "ruma-serde-macros" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2329,8 +2331,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.10.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2346,8 +2348,8 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2#fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 627829f0..67c05536 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "fa2e3662a456bd8957b3e1293c1dfaf66e85c2f2", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "95fdb303c82e257eee18f5064b87ed4e2ed01ac0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c78f6001..c0fcb379 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -51,8 +51,8 @@ pub async fn login_route( identifier, password, }) => { - let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { - matrix_id.to_lowercase() + let username = if let IncomingUserIdentifier::UserIdOrLocalpart(user_id) = identifier { + user_id.to_lowercase() } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 6b15d721..2c610649 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -8,7 +8,7 @@ use ruma::{ api::client::{ error::ErrorKind, uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, + AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, }, }, @@ -74,7 +74,7 @@ impl Uiaa { .. }) => { let username = match identifier { - MatrixId(username) => username, + UserIdOrLocalpart(username) => username, _ => { return Err(Error::BadRequest( ErrorKind::Unrecognized, From ee96a03d60f39fa24bba2643d4c363972fd1df81 Mon Sep 17 00:00:00 2001 From: chenyuqide Date: Sat, 5 Mar 2022 10:16:21 +0800 Subject: [PATCH 250/445] Update ruma --- Cargo.lock | 128 ++++++++++----------------------- Cargo.toml | 2 +- src/client_server/account.rs | 4 ++ src/client_server/backup.rs | 75 +++++++++---------- src/client_server/config.rs | 4 +- src/client_server/directory.rs | 25 ++++++- src/client_server/report.rs | 26 +++---- src/server_server.rs | 24 ++++++- 8 files changed, 144 insertions(+), 144 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c48fdea6..8dba0bfa 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2101,12 +2101,11 @@ dependencies = [ [[package]] name = "ruma" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "assign", "js_int", - "ruma-api", "ruma-appservice-api", "ruma-client-api", "ruma-common", @@ -2120,40 +2119,11 @@ dependencies = [ "ruma-state-res", ] -[[package]] -name = "ruma-api" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "bytes", - "http", - "percent-encoding", - "ruma-api-macros", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "thiserror", - "tracing", -] - -[[package]] -name = "ruma-api-macros" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-appservice-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.5.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ - "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", @@ -2165,7 +2135,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.13.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "assign", "bytes", @@ -2173,7 +2143,6 @@ dependencies = [ "js_int", "maplit", "percent-encoding", - "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", @@ -2185,14 +2154,19 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ + "bytes", + "http", "indexmap", "js_int", + "percent-encoding", "ruma-identifiers", + "ruma-macros", "ruma-serde", "serde", "serde_json", + "thiserror", "tracing", "wildmatch", ] @@ -2200,13 +2174,13 @@ dependencies = [ [[package]] name = "ruma-events" version = "0.26.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "indoc", "js_int", "ruma-common", - "ruma-events-macros", "ruma-identifiers", + "ruma-macros", "ruma-serde", "serde", "serde_json", @@ -2214,24 +2188,12 @@ dependencies = [ "wildmatch", ] -[[package]] -name = "ruma-events-macros" -version = "0.26.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-federation-api" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "js_int", - "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", @@ -2243,33 +2205,22 @@ dependencies = [ [[package]] name = "ruma-identifiers" version = "0.22.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "percent-encoding", "rand 0.8.4", - "ruma-identifiers-macros", "ruma-identifiers-validation", + "ruma-macros", "ruma-serde", - "ruma-serde-macros", "serde", "url", "uuid", ] -[[package]] -name = "ruma-identifiers-macros" -version = "0.22.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "quote", - "ruma-identifiers-validation", - "syn", -] - [[package]] name = "ruma-identifiers-validation" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "thiserror", "url", @@ -2277,24 +2228,34 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "js_int", - "ruma-api", "ruma-common", "ruma-identifiers", "ruma-serde", "serde", ] +[[package]] +name = "ruma-macros" +version = "0.1.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +dependencies = [ + "proc-macro-crate", + "proc-macro2", + "quote", + "ruma-identifiers-validation", + "syn", +] + [[package]] name = "ruma-push-gateway-api" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.4.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "js_int", - "ruma-api", "ruma-common", "ruma-events", "ruma-identifiers", @@ -2306,33 +2267,22 @@ dependencies = [ [[package]] name = "ruma-serde" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "base64 0.13.0", "bytes", "form_urlencoded", "itoa 1.0.1", "js_int", - "ruma-serde-macros", + "ruma-macros", "serde", "serde_json", ] -[[package]] -name = "ruma-serde-macros" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-signatures" version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "base64 0.13.0", "ed25519-dalek", @@ -2348,8 +2298,8 @@ dependencies = [ [[package]] name = "ruma-state-res" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=95fdb303c82e257eee18f5064b87ed4e2ed01ac0#95fdb303c82e257eee18f5064b87ed4e2ed01ac0" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 67c05536..17f158d7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "95fdb303c82e257eee18f5064b87ed4e2ed01ac0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "588fe9c006eb140264160e68f4a21ea1fb28af18", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 32488f22..4c2dff9f 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -342,8 +342,12 @@ pub async fn change_password_route( /// Note: Also works for Application Services pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let device_id = body.sender_device.as_ref().cloned(); + let is_guest = device_id.is_none(); Ok(whoami::v3::Response { user_id: sender_user.clone(), + device_id, + is_guest, }) } diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 808d8868..2e449d10 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -1,10 +1,11 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::api::client::{ backup::{ - add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup, - delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys, - get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys, - get_latest_backup, update_backup, + add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, + create_backup_version, delete_backup_keys, delete_backup_keys_for_room, + delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys, + get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, + update_backup_version, }, error::ErrorKind, }; @@ -14,8 +15,8 @@ use ruma::api::client::{ /// Creates a new backup. pub async fn create_backup_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let version = db .key_backups @@ -23,7 +24,7 @@ pub async fn create_backup_route( db.flush()?; - Ok(create_backup::v3::Response { version }) + Ok(create_backup_version::v3::Response { version }) } /// # `PUT /_matrix/client/r0/room_keys/version/{version}` @@ -31,15 +32,15 @@ pub async fn create_backup_route( /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; db.flush()?; - Ok(update_backup::v3::Response {}) + Ok(update_backup_version::v3::Response {}) } /// # `GET /_matrix/client/r0/room_keys/version` @@ -47,8 +48,8 @@ pub async fn update_backup_route( /// Get information about the latest backup version. pub async fn get_latest_backup_route( db: DatabaseGuard, - body: Ruma, -) -> Result { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = @@ -59,7 +60,7 @@ pub async fn get_latest_backup_route( "Key backup does not exist.", ))?; - Ok(get_latest_backup::v3::Response { + Ok(get_latest_backup_info::v3::Response { algorithm, count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &version)?, @@ -72,8 +73,8 @@ pub async fn get_latest_backup_route( /// Get information about an existing backup. pub async fn get_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db .key_backups @@ -83,7 +84,7 @@ pub async fn get_backup_route( "Key backup does not exist.", ))?; - Ok(get_backup::v3::Response { + Ok(get_backup_info::v3::Response { algorithm, count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, @@ -98,15 +99,15 @@ pub async fn get_backup_route( /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups.delete_backup(sender_user, &body.version)?; db.flush()?; - Ok(delete_backup::v3::Response {}) + Ok(delete_backup_version::v3::Response {}) } /// # `PUT /_matrix/client/r0/room_keys/keys` @@ -164,8 +165,8 @@ pub async fn add_backup_keys_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -193,7 +194,7 @@ pub async fn add_backup_key_sessions_route( db.flush()?; - Ok(add_backup_key_sessions::v3::Response { + Ok(add_backup_keys_for_room::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -208,8 +209,8 @@ pub async fn add_backup_key_sessions_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) @@ -235,7 +236,7 @@ pub async fn add_backup_key_session_route( db.flush()?; - Ok(add_backup_key_session::v3::Response { + Ok(add_backup_keys_for_session::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -260,15 +261,15 @@ pub async fn get_backup_keys_route( /// Retrieves all keys from the backup for a given room. pub async fn get_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sessions = db .key_backups .get_room(sender_user, &body.version, &body.room_id)?; - Ok(get_backup_key_sessions::v3::Response { sessions }) + Ok(get_backup_keys_for_room::v3::Response { sessions }) } /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` @@ -276,8 +277,8 @@ pub async fn get_backup_key_sessions_route( /// Retrieves a key from the backup. pub async fn get_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let key_data = db @@ -288,7 +289,7 @@ pub async fn get_backup_key_session_route( "Backup key not found for this user's session.", ))?; - Ok(get_backup_key_session::v3::Response { key_data }) + Ok(get_backup_keys_for_session::v3::Response { key_data }) } /// # `DELETE /_matrix/client/r0/room_keys/keys` @@ -315,8 +316,8 @@ pub async fn delete_backup_keys_route( /// Delete the keys from the backup for a given room. pub async fn delete_backup_key_sessions_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -324,7 +325,7 @@ pub async fn delete_backup_key_sessions_route( db.flush()?; - Ok(delete_backup_key_sessions::v3::Response { + Ok(delete_backup_keys_for_room::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) @@ -335,8 +336,8 @@ pub async fn delete_backup_key_sessions_route( /// Delete a key from the backup. pub async fn delete_backup_key_session_route( db: DatabaseGuard, - body: Ruma>, -) -> Result { + body: Ruma>, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -344,7 +345,7 @@ pub async fn delete_backup_key_session_route( db.flush()?; - Ok(delete_backup_key_session::v3::Response { + Ok(delete_backup_keys_for_session::v3::Response { count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), etag: db.key_backups.get_etag(sender_user, &body.version)?, }) diff --git a/src/client_server/config.rs b/src/client_server/config.rs index a9a2fb14..d39f8b69 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -22,7 +22,7 @@ pub async fn set_global_account_data_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data: serde_json::Value = serde_json::from_str(body.data.get()) + let data: serde_json::Value = serde_json::from_str(body.data.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); @@ -52,7 +52,7 @@ pub async fn set_room_account_data_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data: serde_json::Value = serde_json::from_str(body.data.get()) + let data: serde_json::Value = serde_json::from_str(body.data.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index ad88254e..f26df879 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -11,13 +11,17 @@ use ruma::{ }, federation, }, - directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork}, + directory::{ + Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomJoinRule, PublicRoomsChunk, + RoomNetwork, + }, events::{ room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, name::RoomNameEventContent, topic::RoomTopicEventContent, }, @@ -265,6 +269,25 @@ pub(crate) async fn get_public_rooms_filtered_helper( .transpose()? // url is now an Option so we must flatten .flatten(), + join_rule: db + .rooms + .room_state_get(&room_id, &EventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| match c.join_rule { + JoinRule::Public => Some(PublicRoomJoinRule::Public), + JoinRule::Knock => Some(PublicRoomJoinRule::Knock), + _ => None, + }) + .map_err(|_| { + Error::bad_database("Invalid room join rule event in database.") + }) + }) + .transpose()? + .flatten() + .ok_or(Error::bad_database( + "Invalid room join rule event in database.", + ))?, room_id, }; Ok(chunk) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 1e47792e..8c51e9ca 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -25,14 +25,14 @@ pub async fn report_event_route( } }; - if body.score > int!(0) || body.score < int!(-100) { + if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Invalid score, must be within 0 to -100", )); }; - if body.reason.chars().count() > 250 { + if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Reason too long, should be 250 characters or fewer", @@ -43,26 +43,26 @@ pub async fn report_event_route( .send_message(message::RoomMessageEventContent::text_html( format!( "Report received from: {}\n\n\ - Event ID: {}\n\ - Room ID: {}\n\ - Sent By: {}\n\n\ - Report Score: {}\n\ - Report Reason: {}", + Event ID: {:?}\n\ + Room ID: {:?}\n\ + Sent By: {:?}\n\n\ + Report Score: {:?}\n\ + Report Reason: {:?}", sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason ), format!( - "
              Report received from: {0}\ -
              • Event Info
                • Event ID: {1}\ - 🔗
                • Room ID: {2}\ -
                • Sent By: {3}
              • \ - Report Info
                • Report Score: {4}
                • Report Reason: {5}
              • \ + "
                Report received from: {0:?}\ +
                • Event Info
                  • Event ID: {1:?}\ + 🔗
                  • Room ID: {2:?}\ +
                  • Sent By: {3:?}
                • \ + Report Info
                  • Report Score: {4:?}
                  • Report Reason: {5}
                • \
                ", sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, - HtmlEscape(&body.reason) + HtmlEscape(&body.reason.clone().unwrap_or(String::new())) ), )); diff --git a/src/server_server.rs b/src/server_server.rs index 9dc26170..56f5b9df 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -30,7 +30,7 @@ use ruma::{ }, query::{get_profile_information, get_room_information}, transactions::{ - edu::{DeviceListUpdateContent, DirectDeviceContent, Edu}, + edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, send_transaction_message, }, }, @@ -840,6 +840,22 @@ pub async fn send_transaction_message_route( db.transaction_ids .add_txnid(&sender, None, &message_id, &[])?; } + Edu::SigningKeyUpdate(SigningKeyUpdateContent { + user_id, + master_key, + self_signing_key, + }) => { + if let Some(master_key) = master_key { + db.users.add_cross_signing_keys( + &user_id, + &master_key, + &self_signing_key, + &None, + &db.rooms, + &db.globals, + )?; + } + } Edu::_Custom(_) => {} } } @@ -2998,6 +3014,12 @@ pub async fn get_devices_route( }) }) .collect(), + master_key: db + .users + .get_master_key(&body.user_id, |u| u == &body.user_id)?, + self_signing_key: db + .users + .get_self_signing_key(&body.user_id, |u| u == &body.user_id)?, }) } From 566833111c2b18055320989845d17c72c3420675 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 6 Apr 2022 18:49:46 +0200 Subject: [PATCH 251/445] refactor: small improvements --- src/client_server/account.rs | 9 ++++++--- src/client_server/report.rs | 2 +- src/database/uiaa.rs | 4 ++-- src/server_server.rs | 9 +++++++-- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 4c2dff9f..fcdf5514 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -340,14 +340,17 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -pub async fn whoami_route(body: Ruma) -> Result { +pub async fn whoami_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device_id = body.sender_device.as_ref().cloned(); - let is_guest = device_id.is_none(); + Ok(whoami::v3::Response { user_id: sender_user.clone(), device_id, - is_guest, + is_guest: db.users.is_deactivated(&sender_user)?, }) } diff --git a/src/client_server/report.rs b/src/client_server/report.rs index 8c51e9ca..e60da692 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -62,7 +62,7 @@ pub async fn report_event_route( pdu.room_id, pdu.sender, body.score, - HtmlEscape(&body.reason.clone().unwrap_or(String::new())) + HtmlEscape(body.reason.as_deref().unwrap_or("")) ), )); diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs index 2c610649..12373139 100644 --- a/src/database/uiaa.rs +++ b/src/database/uiaa.rs @@ -8,8 +8,8 @@ use ruma::{ api::client::{ error::ErrorKind, uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::UserIdOrLocalpart, - UiaaInfo, + AuthType, IncomingAuthData, IncomingPassword, + IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, }, }, signatures::CanonicalJsonValue, diff --git a/src/server_server.rs b/src/server_server.rs index 56f5b9df..d68ded82 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2991,6 +2991,11 @@ pub async fn get_devices_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + Ok(get_devices::v1::Response { user_id: body.user_id.clone(), stream_id: db @@ -3016,10 +3021,10 @@ pub async fn get_devices_route( .collect(), master_key: db .users - .get_master_key(&body.user_id, |u| u == &body.user_id)?, + .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, self_signing_key: db .users - .get_self_signing_key(&body.user_id, |u| u == &body.user_id)?, + .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, }) } From b8411ae2fd359e890b3805116b3b32f9aed16e74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 6 Apr 2022 19:01:16 +0200 Subject: [PATCH 252/445] refactor: rename endpoints to match ruma --- src/client_server/backup.rs | 22 +++++++++++----------- src/main.rs | 22 +++++++++++----------- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index 2e449d10..b48343fc 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -13,7 +13,7 @@ use ruma::api::client::{ /// # `POST /_matrix/client/r0/room_keys/version` /// /// Creates a new backup. -pub async fn create_backup_route( +pub async fn create_backup_version_route( db: DatabaseGuard, body: Ruma, ) -> Result { @@ -30,7 +30,7 @@ pub async fn create_backup_route( /// # `PUT /_matrix/client/r0/room_keys/version/{version}` /// /// Update information about an existing backup. Only `auth_data` can be modified. -pub async fn update_backup_route( +pub async fn update_backup_version_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -46,7 +46,7 @@ pub async fn update_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about the latest backup version. -pub async fn get_latest_backup_route( +pub async fn get_latest_backup_info_route( db: DatabaseGuard, body: Ruma, ) -> Result { @@ -71,7 +71,7 @@ pub async fn get_latest_backup_route( /// # `GET /_matrix/client/r0/room_keys/version` /// /// Get information about an existing backup. -pub async fn get_backup_route( +pub async fn get_backup_info_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -97,7 +97,7 @@ pub async fn get_backup_route( /// Delete an existing key backup. /// /// - Deletes both information about the backup, as well as all key data related to the backup -pub async fn delete_backup_route( +pub async fn delete_backup_version_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -163,7 +163,7 @@ pub async fn add_backup_keys_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -pub async fn add_backup_key_sessions_route( +pub async fn add_backup_keys_for_room_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -207,7 +207,7 @@ pub async fn add_backup_key_sessions_route( /// - Only manipulating the most recently created version of the backup is allowed /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag -pub async fn add_backup_key_session_route( +pub async fn add_backup_keys_for_session_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -259,7 +259,7 @@ pub async fn get_backup_keys_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Retrieves all keys from the backup for a given room. -pub async fn get_backup_key_sessions_route( +pub async fn get_backup_keys_for_room_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -275,7 +275,7 @@ pub async fn get_backup_key_sessions_route( /// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Retrieves a key from the backup. -pub async fn get_backup_key_session_route( +pub async fn get_backup_keys_for_session_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -314,7 +314,7 @@ pub async fn delete_backup_keys_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` /// /// Delete the keys from the backup for a given room. -pub async fn delete_backup_key_sessions_route( +pub async fn delete_backup_keys_for_room_route( db: DatabaseGuard, body: Ruma>, ) -> Result { @@ -334,7 +334,7 @@ pub async fn delete_backup_key_sessions_route( /// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` /// /// Delete a key from the backup. -pub async fn delete_backup_key_session_route( +pub async fn delete_backup_keys_for_session_route( db: DatabaseGuard, body: Ruma>, ) -> Result { diff --git a/src/main.rs b/src/main.rs index fae33804..a9047ecb 100644 --- a/src/main.rs +++ b/src/main.rs @@ -224,18 +224,18 @@ fn routes() -> Router { .ruma_route(client_server::upload_keys_route) .ruma_route(client_server::get_keys_route) .ruma_route(client_server::claim_keys_route) - .ruma_route(client_server::create_backup_route) - .ruma_route(client_server::update_backup_route) - .ruma_route(client_server::delete_backup_route) - .ruma_route(client_server::get_latest_backup_route) - .ruma_route(client_server::get_backup_route) - .ruma_route(client_server::add_backup_key_sessions_route) - .ruma_route(client_server::add_backup_keys_route) - .ruma_route(client_server::delete_backup_key_session_route) - .ruma_route(client_server::delete_backup_key_sessions_route) + .ruma_route(client_server::create_backup_version_route) + .ruma_route(client_server::update_backup_version_route) + .ruma_route(client_server::delete_backup_version_route) + .ruma_route(client_server::get_latest_backup_info_route) + .ruma_route(client_server::get_backup_info_route) + .ruma_route(client_server::add_backup_keys_for_room_route) + .ruma_route(client_server::add_backup_keys_for_session_route) + .ruma_route(client_server::delete_backup_keys_for_room_route) + .ruma_route(client_server::delete_backup_keys_for_session_route) .ruma_route(client_server::delete_backup_keys_route) - .ruma_route(client_server::get_backup_key_session_route) - .ruma_route(client_server::get_backup_key_sessions_route) + .ruma_route(client_server::get_backup_keys_for_room_route) + .ruma_route(client_server::get_backup_keys_for_session_route) .ruma_route(client_server::get_backup_keys_route) .ruma_route(client_server::set_read_marker_route) .ruma_route(client_server::create_receipt_route) From 17ad5f0595c3b91683ef620aa8d3a400479136da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 6 Apr 2022 19:08:23 +0200 Subject: [PATCH 253/445] fix: checks for incoming cross signing changes --- src/server_server.rs | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/src/server_server.rs b/src/server_server.rs index d68ded82..371f2979 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -639,6 +639,11 @@ pub async fn send_transaction_message_route( return Err(Error::bad_config("Federation is disabled.")); } + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + let mut resolved_map = BTreeMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); @@ -674,7 +679,7 @@ pub async fn send_transaction_message_route( } }; - acl_check(&body.origin, &room_id, &db)?; + acl_check(&sender_servername, &room_id, &db)?; let mutex = Arc::clone( db.globals @@ -689,7 +694,7 @@ pub async fn send_transaction_message_route( resolved_map.insert( event_id.clone(), handle_incoming_pdu( - &body.origin, + &sender_servername, &event_id, &room_id, value, @@ -845,6 +850,9 @@ pub async fn send_transaction_message_route( master_key, self_signing_key, }) => { + if user_id.server_name() != sender_servername { + continue; + } if let Some(master_key) = master_key { db.users.add_cross_signing_keys( &user_id, From 2808dd2000f331c9ef90d152afefe7c04e3b1e92 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 6 Apr 2022 21:31:29 +0200 Subject: [PATCH 254/445] Ruma upgrade --- Cargo.lock | 102 +++++------------------- Cargo.toml | 2 +- src/client_server/account.rs | 14 ++-- src/client_server/alias.rs | 6 +- src/client_server/backup.rs | 24 +++--- src/client_server/capabilities.rs | 6 +- src/client_server/config.rs | 8 +- src/client_server/context.rs | 6 +- src/client_server/device.rs | 8 +- src/client_server/directory.rs | 24 +++--- src/client_server/filter.rs | 4 +- src/client_server/keys.rs | 6 +- src/client_server/media.rs | 8 +- src/client_server/membership.rs | 63 +++++++-------- src/client_server/message.rs | 33 +++++--- src/client_server/presence.rs | 4 +- src/client_server/profile.rs | 20 ++--- src/client_server/push.rs | 102 ++++++++++++++++++------ src/client_server/read_marker.rs | 8 +- src/client_server/redact.rs | 6 +- src/client_server/report.rs | 2 +- src/client_server/room.rs | 63 +++++++-------- src/client_server/search.rs | 2 +- src/client_server/session.rs | 4 +- src/client_server/state.rs | 28 +++---- src/client_server/sync.rs | 28 +++---- src/client_server/tag.rs | 30 ++++--- src/client_server/thirdparty.rs | 2 +- src/client_server/to_device.rs | 9 ++- src/client_server/typing.rs | 2 +- src/client_server/unversioned.rs | 5 +- src/client_server/user_directory.rs | 2 +- src/client_server/voip.rs | 2 +- src/database/account_data.rs | 16 ++-- src/database/admin.rs | 32 ++++---- src/database/pusher.rs | 10 +-- src/database/rooms.rs | 118 ++++++++++++++++------------ src/database/sending.rs | 11 ++- src/database/transaction_ids.rs | 2 +- src/database/users.rs | 9 +-- src/main.rs | 12 +-- src/pdu.rs | 18 ++--- src/ruma_wrapper.rs | 13 ++- src/ruma_wrapper/axum.rs | 14 ++-- src/server_server.rs | 106 +++++++++++++++---------- 45 files changed, 524 insertions(+), 470 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 8dba0bfa..cd518259 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2102,19 +2102,16 @@ dependencies = [ [[package]] name = "ruma" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "assign", "js_int", "ruma-appservice-api", "ruma-client-api", "ruma-common", - "ruma-events", "ruma-federation-api", - "ruma-identifiers", "ruma-identity-service-api", "ruma-push-gateway-api", - "ruma-serde", "ruma-signatures", "ruma-state-res", ] @@ -2122,12 +2119,9 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] @@ -2135,7 +2129,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.13.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "assign", "bytes", @@ -2144,9 +2138,6 @@ dependencies = [ "maplit", "percent-encoding", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] @@ -2154,73 +2145,44 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ + "base64 0.13.0", "bytes", + "form_urlencoded", "http", "indexmap", + "indoc", + "itoa 1.0.1", "js_int", "percent-encoding", - "ruma-identifiers", + "rand 0.8.4", + "ruma-identifiers-validation", "ruma-macros", - "ruma-serde", "serde", "serde_json", "thiserror", "tracing", - "wildmatch", -] - -[[package]] -name = "ruma-events" -version = "0.26.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" -dependencies = [ - "indoc", - "js_int", - "ruma-common", - "ruma-identifiers", - "ruma-macros", - "ruma-serde", - "serde", - "serde_json", - "thiserror", + "url", + "uuid", "wildmatch", ] [[package]] name = "ruma-federation-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "js_int", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] -[[package]] -name = "ruma-identifiers" -version = "0.22.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" -dependencies = [ - "percent-encoding", - "rand 0.8.4", - "ruma-identifiers-validation", - "ruma-macros", - "ruma-serde", - "serde", - "url", - "uuid", -] - [[package]] name = "ruma-identifiers-validation" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "thiserror", "url", @@ -2229,19 +2191,17 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "js_int", "ruma-common", - "ruma-identifiers", - "ruma-serde", "serde", ] [[package]] name = "ruma-macros" version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "proc-macro-crate", "proc-macro2", @@ -2253,28 +2213,10 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "js_int", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", -] - -[[package]] -name = "ruma-serde" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" -dependencies = [ - "base64 0.13.0", - "bytes", - "form_urlencoded", - "itoa 1.0.1", - "js_int", - "ruma-macros", "serde", "serde_json", ] @@ -2282,14 +2224,13 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "base64 0.13.0", "ed25519-dalek", "pkcs8", "rand 0.7.3", - "ruma-identifiers", - "ruma-serde", + "ruma-common", "serde_json", "sha2", "thiserror", @@ -2299,14 +2240,11 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=588fe9c006eb140264160e68f4a21ea1fb28af18#588fe9c006eb140264160e68f4a21ea1fb28af18" +source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ "itertools", "js_int", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", "thiserror", diff --git a/Cargo.toml b/Cargo.toml index 17f158d7..64b7a233 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "588fe9c006eb140264160e68f4a21ea1fb28af18", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318ca8514ae338fd6d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/client_server/account.rs b/src/client_server/account.rs index fcdf5514..be14b926 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -18,7 +18,7 @@ use ruma::{ events::{ room::member::{MembershipState, RoomMemberEventContent}, room::message::RoomMessageEventContent, - EventType, + GlobalAccountDataEventType, RoomAccountDataEventType, RoomEventType, }, push, UserId, }; @@ -41,7 +41,7 @@ const GUEST_NAME_LENGTH: usize = 10; /// Note: This will not reserve the username, so the username might become invalid when trying to register pub async fn get_register_available_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { // Validate user id let user_id = @@ -84,7 +84,7 @@ pub async fn get_register_available_route( /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( @@ -194,7 +194,7 @@ pub async fn register_route( db.account_data.update( None, &user_id, - EventType::PushRules, + GlobalAccountDataEventType::PushRules.to_string().into(), &ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { global: push::Ruleset::server_default(&user_id), @@ -271,7 +271,7 @@ pub async fn register_route( /// - Triggers device list updates pub async fn change_password_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -366,7 +366,7 @@ pub async fn whoami_route( /// - Removes ability to log in again pub async fn deactivate_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -440,7 +440,7 @@ pub async fn deactivate_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_user.to_string()), diff --git a/src/client_server/alias.rs b/src/client_server/alias.rs index 75cf85e5..90e9d2c3 100644 --- a/src/client_server/alias.rs +++ b/src/client_server/alias.rs @@ -17,7 +17,7 @@ use ruma::{ /// Creates a new room alias on this server. pub async fn create_alias_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( @@ -46,7 +46,7 @@ pub async fn create_alias_route( /// - TODO: Update canonical alias event pub async fn delete_alias_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.room_alias.server_name() != db.globals.server_name() { return Err(Error::BadRequest( @@ -71,7 +71,7 @@ pub async fn delete_alias_route( /// - TODO: Suggest more servers to join via pub async fn get_alias_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { get_alias_helper(&db, &body.room_alias).await } diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs index b48343fc..067f20cd 100644 --- a/src/client_server/backup.rs +++ b/src/client_server/backup.rs @@ -32,7 +32,7 @@ pub async fn create_backup_version_route( /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_version_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); db.key_backups @@ -73,7 +73,7 @@ pub async fn get_latest_backup_info_route( /// Get information about an existing backup. pub async fn get_backup_info_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let algorithm = db @@ -99,7 +99,7 @@ pub async fn get_backup_info_route( /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_version_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -119,7 +119,7 @@ pub async fn delete_backup_version_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -165,7 +165,7 @@ pub async fn add_backup_keys_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -209,7 +209,7 @@ pub async fn add_backup_keys_for_room_route( /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_session_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -247,7 +247,7 @@ pub async fn add_backup_keys_for_session_route( /// Retrieves all keys from the backup. pub async fn get_backup_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -261,7 +261,7 @@ pub async fn get_backup_keys_route( /// Retrieves all keys from the backup for a given room. pub async fn get_backup_keys_for_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -277,7 +277,7 @@ pub async fn get_backup_keys_for_room_route( /// Retrieves a key from the backup. pub async fn get_backup_keys_for_session_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -297,7 +297,7 @@ pub async fn get_backup_keys_for_session_route( /// Delete the keys from the backup. pub async fn delete_backup_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -316,7 +316,7 @@ pub async fn delete_backup_keys_route( /// Delete the keys from the backup for a given room. pub async fn delete_backup_keys_for_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -336,7 +336,7 @@ pub async fn delete_backup_keys_for_room_route( /// Delete a key from the backup. pub async fn delete_backup_keys_for_session_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index ac2e59f6..952db581 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,7 +1,7 @@ use crate::{Result, Ruma}; use ruma::{ - api::client::capabilities::{ - get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, + api::client::discovery::get_capabilities::{ + self, Capabilities, RoomVersionStability, RoomVersionsCapability, }, RoomVersionId, }; @@ -11,7 +11,7 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( - _body: Ruma, + _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); available.insert(RoomVersionId::V5, RoomVersionStability::Stable); diff --git a/src/client_server/config.rs b/src/client_server/config.rs index d39f8b69..6184e0bc 100644 --- a/src/client_server/config.rs +++ b/src/client_server/config.rs @@ -18,7 +18,7 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// Sets some account data for the sender user. pub async fn set_global_account_data_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -48,7 +48,7 @@ pub async fn set_global_account_data_route( /// Sets some room account data for the sender user. pub async fn set_room_account_data_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -78,7 +78,7 @@ pub async fn set_room_account_data_route( /// Gets some account data for the sender user. pub async fn get_global_account_data_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -99,7 +99,7 @@ pub async fn get_global_account_data_route( /// Gets some room account data for the sender user. pub async fn get_room_account_data_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 2f6a2eac..8ecd6ecf 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,7 +1,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, - events::EventType, + events::{EventType, StateEventType}, }; use std::{collections::HashSet, convert::TryFrom}; use tracing::error; @@ -14,7 +14,7 @@ use tracing::error; /// joined, depending on history_visibility) pub async fn get_context_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -154,7 +154,7 @@ pub async fn get_context_route( for (shortstatekey, id) in state_ids { let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; - if event_type != EventType::RoomMember { + if event_type != StateEventType::RoomMember { let pdu = match db.rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { diff --git a/src/client_server/device.rs b/src/client_server/device.rs index 09c94064..b100bf22 100644 --- a/src/client_server/device.rs +++ b/src/client_server/device.rs @@ -30,7 +30,7 @@ pub async fn get_devices_route( /// Get metadata on a single device of the sender user. pub async fn get_device_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -47,7 +47,7 @@ pub async fn get_device_route( /// Updates the metadata on a given device of the sender user. pub async fn update_device_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -77,7 +77,7 @@ pub async fn update_device_route( /// - Triggers device list updates pub async fn delete_device_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -135,7 +135,7 @@ pub async fn delete_device_route( /// - Triggers device list updates pub async fn delete_devices_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); diff --git a/src/client_server/directory.rs b/src/client_server/directory.rs index f26df879..4e4a3225 100644 --- a/src/client_server/directory.rs +++ b/src/client_server/directory.rs @@ -25,7 +25,7 @@ use ruma::{ name::RoomNameEventContent, topic::RoomTopicEventContent, }, - EventType, + StateEventType, }, ServerName, UInt, }; @@ -38,7 +38,7 @@ use tracing::{info, warn}; /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { get_public_rooms_filtered_helper( &db, @@ -58,7 +58,7 @@ pub async fn get_public_rooms_filtered_route( /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let response = get_public_rooms_filtered_helper( &db, @@ -85,7 +85,7 @@ pub async fn get_public_rooms_route( /// - TODO: Access control checks pub async fn set_room_visibility_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -113,7 +113,7 @@ pub async fn set_room_visibility_route( /// Gets the visibility of a given room in the room directory. pub async fn get_room_visibility_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { Ok(get_room_visibility::v3::Response { visibility: if db.rooms.is_public_room(&body.room_id)? { @@ -193,7 +193,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( let chunk = PublicRoomsChunk { canonical_alias: db .rooms - .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? + .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomCanonicalAliasEventContent| c.alias) @@ -203,7 +203,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, name: db .rooms - .room_state_get(&room_id, &EventType::RoomName, "")? + .room_state_get(&room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomNameEventContent| c.name) @@ -222,7 +222,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .expect("user count should not be that big"), topic: db .rooms - .room_state_get(&room_id, &EventType::RoomTopic, "")? + .room_state_get(&room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) @@ -232,7 +232,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, world_readable: db .rooms - .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? + .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| { @@ -246,7 +246,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, guest_can_join: db .rooms - .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? + .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomGuestAccessEventContent| { @@ -258,7 +258,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, avatar_url: db .rooms - .room_state_get(&room_id, &EventType::RoomAvatar, "")? + .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomAvatarEventContent| c.url) @@ -271,7 +271,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .flatten(), join_rule: db .rooms - .room_state_get(&room_id, &EventType::RoomJoinRules, "")? + .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomJoinRulesEventContent| match c.join_rule { diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs index 379950f4..6522c900 100644 --- a/src/client_server/filter.rs +++ b/src/client_server/filter.rs @@ -11,7 +11,7 @@ use ruma::api::client::{ /// - A user can only access their own filters pub async fn get_filter_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let filter = match db.users.get_filter(sender_user, &body.filter_id)? { @@ -27,7 +27,7 @@ pub async fn get_filter_route( /// Creates a new filter to be used by other endpoints. pub async fn create_filter_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(create_filter::v3::Response::new( diff --git a/src/client_server/keys.rs b/src/client_server/keys.rs index 525c7790..c4f91cb2 100644 --- a/src/client_server/keys.rs +++ b/src/client_server/keys.rs @@ -71,7 +71,7 @@ pub async fn upload_keys_route( /// - The master and self-signing keys contain signatures that the user is allowed to see pub async fn get_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -107,7 +107,7 @@ pub async fn claim_keys_route( /// - Requires UIAA to verify password pub async fn upload_signing_keys_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -231,7 +231,7 @@ pub async fn upload_signatures_route( /// - TODO: left users pub async fn get_key_changes_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/media.rs b/src/client_server/media.rs index 71dbed68..a9a6d6cd 100644 --- a/src/client_server/media.rs +++ b/src/client_server/media.rs @@ -32,7 +32,7 @@ pub async fn get_media_config_route( /// - Media will be saved in the media/ directory pub async fn create_content_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let mxc = format!( "mxc://{}/{}", @@ -101,7 +101,7 @@ pub async fn get_remote_content( /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -132,7 +132,7 @@ pub async fn get_content_route( /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -168,7 +168,7 @@ pub async fn get_content_as_filename_route( /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0f5e7c2c..8fb2fec0 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -21,7 +21,7 @@ use ruma::{ create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, }, - EventType, + RoomEventType, StateEventType, }, serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, state_res::{self, RoomVersion}, @@ -44,7 +44,7 @@ use tracing::{debug, error, warn}; /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -84,7 +84,7 @@ pub async fn join_room_by_id_route( /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_or_alias_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; @@ -136,7 +136,7 @@ pub async fn join_room_by_id_or_alias_route( /// - This should always work if the user is currently joined. pub async fn leave_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -152,7 +152,7 @@ pub async fn leave_room_route( /// Tries to send an invite event into the room. pub async fn invite_user_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -170,7 +170,7 @@ pub async fn invite_user_route( /// Tries to send a kick event into the room. pub async fn kick_user_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -178,7 +178,7 @@ pub async fn kick_user_route( db.rooms .room_state_get( &body.room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, &body.user_id.to_string(), )? .ok_or(Error::BadRequest( @@ -205,7 +205,7 @@ pub async fn kick_user_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -229,7 +229,7 @@ pub async fn kick_user_route( /// Tries to send a ban event into the room. pub async fn ban_user_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -239,7 +239,7 @@ pub async fn ban_user_route( .rooms .room_state_get( &body.room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, &body.user_id.to_string(), )? .map_or( @@ -275,7 +275,7 @@ pub async fn ban_user_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -299,7 +299,7 @@ pub async fn ban_user_route( /// Tries to send an unban event into the room. pub async fn unban_user_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -307,7 +307,7 @@ pub async fn unban_user_route( db.rooms .room_state_get( &body.room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, &body.user_id.to_string(), )? .ok_or(Error::BadRequest( @@ -333,7 +333,7 @@ pub async fn unban_user_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -362,7 +362,7 @@ pub async fn unban_user_route( /// be called from every device pub async fn forget_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -398,7 +398,7 @@ pub async fn joined_rooms_route( /// - Only works if the user is currently joined pub async fn get_member_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -415,8 +415,8 @@ pub async fn get_member_events_route( .rooms .room_state_full(&body.room_id)? .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) - .map(|(_, pdu)| pdu.to_member_event()) + .filter(|(key, _)| key.0 == StateEventType::RoomMember) + .map(|(_, pdu)| pdu.to_member_event().into()) .collect(), }) } @@ -429,7 +429,7 @@ pub async fn get_member_events_route( /// - TODO: An appservice just needs a puppet joined pub async fn joined_members_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -625,15 +625,17 @@ async fn join_room_by_id_helper( db.rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = - db.rooms - .get_or_create_shortstatekey(&pdu.kind, state_key, &db.globals)?; + let shortstatekey = db.rooms.get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + state_key, + &db.globals, + )?; state.insert(shortstatekey, pdu.event_id.clone()); } } let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey( - &parsed_pdu.kind, + &parsed_pdu.kind.to_string().into(), parsed_pdu .state_key .as_ref() @@ -645,7 +647,7 @@ async fn join_room_by_id_helper( let create_shortstatekey = db .rooms - .get_shortstatekey(&EventType::RoomCreate, "")? + .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); if state.get(&create_shortstatekey).is_none() { @@ -703,7 +705,7 @@ async fn join_room_by_id_helper( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_user.to_string()), @@ -814,7 +816,7 @@ pub(crate) async fn invite_helper<'a>( let create_event = db .rooms - .room_state_get(room_id, &EventType::RoomCreate, "")?; + .room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -853,11 +855,11 @@ pub(crate) async fn invite_helper<'a>( .expect("member event is valid value"); let state_key = user_id.to_string(); - let kind = EventType::RoomMember; + let kind = StateEventType::RoomMember; let auth_events = db.rooms.get_auth_events( room_id, - &kind, + &kind.to_string().into(), sender_user, Some(&state_key), &content, @@ -888,7 +890,7 @@ pub(crate) async fn invite_helper<'a>( origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), - kind, + kind: kind.to_string().into(), content, state_key: Some(state_key), prev_events, @@ -912,7 +914,6 @@ pub(crate) async fn invite_helper<'a>( let auth_check = state_res::auth_check( &room_version, &pdu, - create_prev_event, None::, // TODO: third_party_invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -1051,7 +1052,7 @@ pub(crate) async fn invite_helper<'a>( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, displayname: db.users.displayname(user_id)?, diff --git a/src/client_server/message.rs b/src/client_server/message.rs index b5c41490..1348132f 100644 --- a/src/client_server/message.rs +++ b/src/client_server/message.rs @@ -4,7 +4,7 @@ use ruma::{ error::ErrorKind, message::{get_message_events, send_message_event}, }, - events::EventType, + events::{RoomEventType, StateEventType}, }; use std::{ collections::{BTreeMap, HashSet}, @@ -20,7 +20,7 @@ use std::{ /// - Tries to send the event into the room, auth rules will determine if it is allowed pub async fn send_message_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -36,7 +36,9 @@ pub async fn send_message_event_route( let state_lock = mutex_state.lock().await; // Forbid m.room.encrypted if encryption is disabled - if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() { + if RoomEventType::RoomEncrypted == body.event_type.to_string().into() + && !db.globals.allow_encryption() + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Encryption has been disabled", @@ -69,7 +71,7 @@ pub async fn send_message_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::from(&*body.event_type), + event_type: body.event_type.to_string().into(), content: serde_json::from_str(body.body.body.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, unsigned: Some(unsigned), @@ -106,7 +108,7 @@ pub async fn send_message_event_route( /// joined, depending on history_visibility) pub async fn get_message_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -118,11 +120,16 @@ pub async fn get_message_events_route( )); } - let from = body - .from - .clone() - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; + let from = match body.from.clone() { + Some(from) => from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?, + + None => match body.dir { + get_message_events::v3::Direction::Forward => 0, + get_message_events::v3::Direction::Backward => u64::MAX, + }, + }; let to = body.to.as_ref().map(|t| t.parse()); @@ -172,7 +179,7 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - resp.start = body.from.to_owned(); + resp.start = from.to_string(); resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_after; } @@ -209,7 +216,7 @@ pub async fn get_message_events_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - resp.start = body.from.to_owned(); + resp.start = from.to_string(); resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_before; } @@ -219,7 +226,7 @@ pub async fn get_message_events_route( for ll_id in &lazy_loaded { if let Some(member_event) = db.rooms - .room_state_get(&body.room_id, &EventType::RoomMember, ll_id.as_str())? + .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? { resp.state.push(member_event.to_state_event()); } diff --git a/src/client_server/presence.rs b/src/client_server/presence.rs index 9e6ce0b8..773fef47 100644 --- a/src/client_server/presence.rs +++ b/src/client_server/presence.rs @@ -7,7 +7,7 @@ use std::time::Duration; /// Sets the presence state of the sender user. pub async fn set_presence_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -48,7 +48,7 @@ pub async fn set_presence_route( /// - Only works if you share a room with the user pub async fn get_presence_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/profile.rs b/src/client_server/profile.rs index 30000272..acea19f0 100644 --- a/src/client_server/profile.rs +++ b/src/client_server/profile.rs @@ -9,7 +9,7 @@ use ruma::{ }, federation::{self, query::get_profile_information::v1::ProfileField}, }, - events::{room::member::RoomMemberEventContent, EventType}, + events::{room::member::RoomMemberEventContent, RoomEventType, StateEventType}, }; use serde_json::value::to_raw_value; use std::sync::Arc; @@ -21,7 +21,7 @@ use std::sync::Arc; /// - Also makes sure other users receive the update using presence EDUs pub async fn set_displayname_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -36,14 +36,14 @@ pub async fn set_displayname_route( .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( db.rooms .room_state_get( &room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, sender_user.as_str(), )? .ok_or_else(|| { @@ -118,7 +118,7 @@ pub async fn set_displayname_route( /// - If user is on another server: Fetches displayname over federation pub async fn get_displayname_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db @@ -150,7 +150,7 @@ pub async fn get_displayname_route( /// - Also makes sure other users receive the update using presence EDUs pub async fn set_avatar_url_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -167,14 +167,14 @@ pub async fn set_avatar_url_route( .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( db.rooms .room_state_get( &room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, sender_user.as_str(), )? .ok_or_else(|| { @@ -249,7 +249,7 @@ pub async fn set_avatar_url_route( /// - If user is on another server: Fetches avatar_url and blurhash over federation pub async fn get_avatar_url_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db @@ -283,7 +283,7 @@ pub async fn get_avatar_url_route( /// - If user is on another server: Fetches profile over federation pub async fn get_profile_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if body.user_id.server_name() != db.globals.server_name() { let response = db diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 90f4e028..5169b8bf 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -8,7 +8,9 @@ use ruma::{ set_pushrule_enabled, RuleKind, }, }, - events::{push_rules::PushRulesEvent, EventType}, + events::{ + push_rules::PushRulesEvent, EventType, GlobalAccountDataEventType, RoomAccountDataEventType, + }, push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; @@ -23,7 +25,11 @@ pub async fn get_pushrules_all_route( let event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -39,13 +45,17 @@ pub async fn get_pushrules_all_route( /// Retrieves a single specified push rule for this user. pub async fn get_pushrule_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -91,7 +101,7 @@ pub async fn get_pushrule_route( /// Creates a single specified push rule for this user. pub async fn set_pushrule_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -105,7 +115,11 @@ pub async fn set_pushrule_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -174,8 +188,13 @@ pub async fn set_pushrule_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + db.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &event, + &db.globals, + )?; db.flush()?; @@ -187,7 +206,7 @@ pub async fn set_pushrule_route( /// Gets the actions of a single specified push rule for this user. pub async fn get_pushrule_actions_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -200,7 +219,11 @@ pub async fn get_pushrule_actions_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -243,7 +266,7 @@ pub async fn get_pushrule_actions_route( /// Sets the actions of a single specified push rule for this user. pub async fn set_pushrule_actions_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -256,7 +279,11 @@ pub async fn set_pushrule_actions_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -297,8 +324,13 @@ pub async fn set_pushrule_actions_route( _ => {} }; - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + db.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &event, + &db.globals, + )?; db.flush()?; @@ -310,7 +342,7 @@ pub async fn set_pushrule_actions_route( /// Gets the enabled status of a single specified push rule for this user. pub async fn get_pushrule_enabled_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -323,7 +355,11 @@ pub async fn get_pushrule_enabled_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -369,7 +405,7 @@ pub async fn get_pushrule_enabled_route( /// Sets the enabled status of a single specified push rule for this user. pub async fn set_pushrule_enabled_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -382,7 +418,11 @@ pub async fn set_pushrule_enabled_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -428,8 +468,13 @@ pub async fn set_pushrule_enabled_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + db.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &event, + &db.globals, + )?; db.flush()?; @@ -441,7 +486,7 @@ pub async fn set_pushrule_enabled_route( /// Deletes a single specified push rule for this user. pub async fn delete_pushrule_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -454,7 +499,11 @@ pub async fn delete_pushrule_route( let mut event: PushRulesEvent = db .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", @@ -490,8 +539,13 @@ pub async fn delete_pushrule_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + db.account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &event, + &db.globals, + )?; db.flush()?; diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs index 9422f218..91988a47 100644 --- a/src/client_server/read_marker.rs +++ b/src/client_server/read_marker.rs @@ -1,7 +1,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::EventType, + events::RoomAccountDataEventType, receipt::ReceiptType, MilliSecondsSinceUnixEpoch, }; @@ -15,7 +15,7 @@ use std::collections::BTreeMap; /// - If `read_receipt` is set: Update private marker and public read receipt EDU pub async fn set_read_marker_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -27,7 +27,7 @@ pub async fn set_read_marker_route( db.account_data.update( Some(&body.room_id), sender_user, - EventType::FullyRead, + RoomAccountDataEventType::FullyRead, &fully_read_event, &db.globals, )?; @@ -80,7 +80,7 @@ pub async fn set_read_marker_route( /// Sets private read marker and public read receipt EDU. pub async fn create_receipt_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/redact.rs b/src/client_server/redact.rs index 4843993a..059e0f52 100644 --- a/src/client_server/redact.rs +++ b/src/client_server/redact.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma}; use ruma::{ api::client::redact::redact_event, - events::{room::redaction::RoomRedactionEventContent, EventType}, + events::{room::redaction::RoomRedactionEventContent, RoomEventType}, }; use serde_json::value::to_raw_value; @@ -15,7 +15,7 @@ use serde_json::value::to_raw_value; /// - TODO: Handle txn id pub async fn redact_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; @@ -32,7 +32,7 @@ pub async fn redact_event_route( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomRedaction, + event_type: RoomEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { reason: body.reason.clone(), }) diff --git a/src/client_server/report.rs b/src/client_server/report.rs index e60da692..14768e1c 100644 --- a/src/client_server/report.rs +++ b/src/client_server/report.rs @@ -11,7 +11,7 @@ use ruma::{ /// pub async fn report_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 99838ceb..1b3b8409 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -19,7 +19,7 @@ use ruma::{ tombstone::RoomTombstoneEventContent, topic::RoomTopicEventContent, }, - EventType, + RoomEventType, StateEventType, }, int, serde::{CanonicalJsonObject, JsonObject}, @@ -47,7 +47,7 @@ use tracing::{info, warn}; /// - Send invite events pub async fn create_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { use create_room::v3::RoomPreset; @@ -165,7 +165,7 @@ pub async fn create_room_route( // 1. The room create event db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -180,7 +180,7 @@ pub async fn create_room_route( // 2. Let the room creator join db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: db.users.displayname(sender_user)?, @@ -242,7 +242,7 @@ pub async fn create_room_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("to_raw_value always works on serde_json::Value"), unsigned: None, @@ -259,7 +259,7 @@ pub async fn create_room_route( if let Some(room_alias_id) = &alias { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCanonicalAlias, + event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(room_alias_id.to_owned()), alt_aliases: vec![], @@ -281,7 +281,7 @@ pub async fn create_room_route( // 5.1 Join Rules db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomJoinRules, + event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default @@ -301,7 +301,7 @@ pub async fn create_room_route( // 5.2 History Visibility db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomHistoryVisibility, + event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -319,7 +319,7 @@ pub async fn create_room_route( // 5.3 Guest Access db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomGuestAccess, + event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { RoomPreset::PublicChat => GuestAccess::Forbidden, _ => GuestAccess::CanJoin, @@ -346,7 +346,8 @@ pub async fn create_room_route( pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() { + if pdu_builder.event_type == RoomEventType::RoomEncryption && !db.globals.allow_encryption() + { continue; } @@ -358,7 +359,7 @@ pub async fn create_room_route( if let Some(name) = &body.name { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomName, + event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) .expect("event is valid, we just created it"), unsigned: None, @@ -375,7 +376,7 @@ pub async fn create_room_route( if let Some(topic) = &body.topic { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomTopic, + event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: topic.clone(), }) @@ -420,7 +421,7 @@ pub async fn create_room_route( /// - You have to currently be joined to the room (TODO: Respect history visibility) pub async fn get_room_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -447,7 +448,7 @@ pub async fn get_room_event_route( /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable pub async fn get_room_aliases_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -479,7 +480,7 @@ pub async fn get_room_aliases_route( /// - Modifies old room power levels to prevent users from speaking pub async fn upgrade_room_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -509,7 +510,7 @@ pub async fn upgrade_room_route( // Fail if the sender does not have the required permissions let tombstone_event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomTombstone, + event_type: RoomEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), @@ -540,7 +541,7 @@ pub async fn upgrade_room_route( // Get the old room creation event let mut create_event_content = serde_json::from_str::( db.rooms - .room_state_get(&body.room_id, &EventType::RoomCreate, "")? + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content .get(), @@ -589,7 +590,7 @@ pub async fn upgrade_room_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, @@ -605,7 +606,7 @@ pub async fn upgrade_room_route( // Join the new room db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: db.users.displayname(sender_user)?, @@ -629,15 +630,15 @@ pub async fn upgrade_room_route( // Recommended transferable state events list from the specs let transferable_state_events = vec![ - EventType::RoomServerAcl, - EventType::RoomEncryption, - EventType::RoomName, - EventType::RoomAvatar, - EventType::RoomTopic, - EventType::RoomGuestAccess, - EventType::RoomHistoryVisibility, - EventType::RoomJoinRules, - EventType::RoomPowerLevels, + StateEventType::RoomServerAcl, + StateEventType::RoomEncryption, + StateEventType::RoomName, + StateEventType::RoomAvatar, + StateEventType::RoomTopic, + StateEventType::RoomGuestAccess, + StateEventType::RoomHistoryVisibility, + StateEventType::RoomJoinRules, + StateEventType::RoomPowerLevels, ]; // Replicate transferable state events to the new room @@ -649,7 +650,7 @@ pub async fn upgrade_room_route( db.rooms.build_and_append_pdu( PduBuilder { - event_type, + event_type: event_type.to_string().into(), content: event_content, unsigned: None, state_key: Some("".to_owned()), @@ -671,7 +672,7 @@ pub async fn upgrade_room_route( // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( db.rooms - .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? + .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content .get(), @@ -686,7 +687,7 @@ pub async fn upgrade_room_route( // Modify the power levels in the old room to prevent sending of events and inviting new users let _ = db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/client_server/search.rs b/src/client_server/search.rs index 753669a2..686e3b5e 100644 --- a/src/client_server/search.rs +++ b/src/client_server/search.rs @@ -16,7 +16,7 @@ use std::collections::BTreeMap; /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c0fcb379..c31636db 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -22,7 +22,7 @@ struct Claims { /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. pub async fn get_login_types_route( - _body: Ruma, + _body: Ruma, ) -> Result { Ok(get_login_types::v3::Response::new(vec![ get_login_types::v3::LoginType::Password(Default::default()), @@ -42,7 +42,7 @@ pub async fn get_login_types_route( /// supported login types. pub async fn login_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { // Validate login method // TODO: Other login methods diff --git a/src/client_server/state.rs b/src/client_server/state.rs index a97b1872..c0fbf734 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -13,7 +13,7 @@ use ruma::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, }, - AnyStateEventContent, EventType, + AnyStateEventContent, EventType, RoomEventType, StateEventType, }, serde::Raw, EventId, RoomId, UserId, @@ -28,7 +28,7 @@ use ruma::{ /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_key_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -36,7 +36,7 @@ pub async fn send_state_event_for_key_route( &db, sender_user, &body.room_id, - EventType::from(&*body.event_type), + &body.event_type, &body.body.body, // Yes, I hate it too body.state_key.to_owned(), ) @@ -57,12 +57,12 @@ pub async fn send_state_event_for_key_route( /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_empty_key_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled - if &body.event_type == "m.room.encryption" && !db.globals.allow_encryption() { + if body.event_type == StateEventType::RoomEncryption && !db.globals.allow_encryption() { return Err(Error::BadRequest( ErrorKind::Forbidden, "Encryption has been disabled", @@ -73,7 +73,7 @@ pub async fn send_state_event_for_empty_key_route( &db, sender_user, &body.room_id, - EventType::from(&*body.event_type), + &body.event_type.to_string().into(), &body.body.body, body.state_key.to_owned(), ) @@ -92,7 +92,7 @@ pub async fn send_state_event_for_empty_key_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -102,7 +102,7 @@ pub async fn get_state_events_route( if !db.rooms.is_joined(sender_user, &body.room_id)? && !matches!( db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -138,7 +138,7 @@ pub async fn get_state_events_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_key_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -148,7 +148,7 @@ pub async fn get_state_events_for_key_route( if !db.rooms.is_joined(sender_user, &body.room_id)? && !matches!( db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -188,7 +188,7 @@ pub async fn get_state_events_for_key_route( /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_empty_key_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -198,7 +198,7 @@ pub async fn get_state_events_for_empty_key_route( if !db.rooms.is_joined(sender_user, &body.room_id)? && !matches!( db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -236,7 +236,7 @@ async fn send_state_event_for_key_helper( db: &Database, sender: &UserId, room_id: &RoomId, - event_type: EventType, + event_type: &StateEventType, json: &Raw, state_key: String, ) -> Result> { @@ -282,7 +282,7 @@ async fn send_state_event_for_key_helper( let event_id = db.rooms.build_and_append_pdu( PduBuilder { - event_type, + event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get()).expect("content is valid json"), unsigned: None, state_key: Some(state_key), diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index 5f34fa6b..de6a45a9 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -7,7 +7,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, - EventType, + EventType, RoomEventType, StateEventType, }, serde::Raw, DeviceId, RoomId, UserId, @@ -56,7 +56,7 @@ use tracing::error; /// `since` will be cached pub async fn sync_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); @@ -74,7 +74,7 @@ pub async fn sync_events_route( Entry::Vacant(v) => { let (tx, rx) = tokio::sync::watch::channel(None); - v.insert((body.since.clone(), rx.clone())); + v.insert((body.since.to_owned(), rx.clone())); tokio::spawn(sync_helper_wrapper( Arc::clone(&arc_db), @@ -319,7 +319,7 @@ async fn sync_helper( .rooms .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) + .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) .map(|(_, pdu)| { let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get()).map_err(|_| { @@ -385,7 +385,7 @@ async fn sync_helper( for (shortstatekey, id) in current_state_ids { let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; - if event_type != EventType::RoomMember { + if event_type != StateEventType::RoomMember { let pdu = match db.rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { @@ -446,7 +446,7 @@ async fn sync_helper( .rooms .state_get( since_shortstatehash, - &EventType::RoomMember, + &StateEventType::RoomMember, sender_user.as_str(), )? .and_then(|pdu| { @@ -475,7 +475,7 @@ async fn sync_helper( } }; - if pdu.kind == EventType::RoomMember { + if pdu.kind == RoomEventType::RoomMember { match UserId::parse( pdu.state_key .as_ref() @@ -508,7 +508,7 @@ async fn sync_helper( { if let Some(member_event) = db.rooms.room_state_get( &room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, event.sender.as_str(), )? { lazy_loaded.insert(event.sender.clone()); @@ -527,23 +527,23 @@ async fn sync_helper( let encrypted_room = db .rooms - .state_get(current_shortstatehash, &EventType::RoomEncryption, "")? + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? .is_some(); let since_encryption = db.rooms - .state_get(since_shortstatehash, &EventType::RoomEncryption, "")?; + .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?; // Calculations: let new_encrypted_room = encrypted_room && since_encryption.is_none(); let send_member_count = state_events .iter() - .any(|event| event.kind == EventType::RoomMember); + .any(|event| event.kind == RoomEventType::RoomMember); if encrypted_room { for state_event in &state_events { - if state_event.kind != EventType::RoomMember { + if state_event.kind != RoomEventType::RoomMember { continue; } @@ -830,7 +830,7 @@ async fn sync_helper( .filter_map(|other_room_id| { Some( db.rooms - .room_state_get(&other_room_id, &EventType::RoomEncryption, "") + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), ) @@ -923,7 +923,7 @@ fn share_encrypted_room( .filter_map(|other_room_id| { Some( db.rooms - .room_state_get(&other_room_id, &EventType::RoomEncryption, "") + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), ) diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 21cff0bb..03408862 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ tag::{TagEvent, TagEventContent}, - EventType, + EventType, RoomAccountDataEventType, }, }; use std::collections::BTreeMap; @@ -15,13 +15,17 @@ use std::collections::BTreeMap; /// - Inserts the tag into the tag event of the room account data. pub async fn update_tag_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? + .get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )? .unwrap_or_else(|| TagEvent { content: TagEventContent { tags: BTreeMap::new(), @@ -35,7 +39,7 @@ pub async fn update_tag_route( db.account_data.update( Some(&body.room_id), sender_user, - EventType::Tag, + RoomAccountDataEventType::Tag, &tags_event, &db.globals, )?; @@ -52,13 +56,17 @@ pub async fn update_tag_route( /// - Removes the tag from the tag event of the room account data. pub async fn delete_tag_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut tags_event = db .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? + .get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )? .unwrap_or_else(|| TagEvent { content: TagEventContent { tags: BTreeMap::new(), @@ -69,7 +77,7 @@ pub async fn delete_tag_route( db.account_data.update( Some(&body.room_id), sender_user, - EventType::Tag, + RoomAccountDataEventType::Tag, &tags_event, &db.globals, )?; @@ -86,14 +94,18 @@ pub async fn delete_tag_route( /// - Gets the tag event of the room account data. pub async fn get_tags_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_tags::v3::Response { tags: db .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? + .get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )? .unwrap_or_else(|| TagEvent { content: TagEventContent { tags: BTreeMap::new(), diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs index c2c1adfd..5665ad6c 100644 --- a/src/client_server/thirdparty.rs +++ b/src/client_server/thirdparty.rs @@ -7,7 +7,7 @@ use std::collections::BTreeMap; /// /// TODO: Fetches all metadata about protocols supported by the homeserver. pub async fn get_protocols_route( - _body: Ruma, + _body: Ruma, ) -> Result { // TODO Ok(get_protocols::v3::Response { diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 6d4fc0ca..42364f56 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -1,3 +1,4 @@ +use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; use crate::{database::DatabaseGuard, Error, Result, Ruma}; @@ -15,7 +16,7 @@ use ruma::{ /// Send a to-device event to a set of client devices. pub async fn send_event_to_device_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); @@ -45,8 +46,8 @@ pub async fn send_event_to_device_route( serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { sender: sender_user.clone(), - ev_type: EventType::from(&*body.event_type), - message_id: body.txn_id.clone(), + ev_type: ToDeviceEventType::from(&*body.event_type), + message_id: body.txn_id.to_owned(), messages, }, )) @@ -61,7 +62,7 @@ pub async fn send_event_to_device_route( DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event( sender_user, target_user_id, - target_device_id, + &target_device_id, &body.event_type, event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 9d4ba6f8..60fc1cc4 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -6,7 +6,7 @@ use ruma::api::client::typing::create_typing_event; /// Sets the typing state of the sender user. pub async fn create_typing_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { use create_typing_event::v3::Typing; diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 84ac355e..294c7536 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -1,7 +1,8 @@ use std::{collections::BTreeMap, iter::FromIterator}; +use ruma::api::client::discovery::get_supported_versions; + use crate::{Result, Ruma}; -use ruma::api::client::discover::get_supported_versions; /// # `GET /_matrix/client/versions` /// @@ -14,7 +15,7 @@ use ruma::api::client::discover::get_supported_versions; /// Note: Unstable features are used while developing new features. Clients should avoid using /// unstable features in their stable releases pub async fn get_supported_versions_route( - _body: Ruma, + _body: Ruma, ) -> Result { let resp = get_supported_versions::Response { versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index d641848f..7c0bcc16 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -8,7 +8,7 @@ use ruma::api::client::user_directory::search_users; /// - TODO: Hide users that are not in any public rooms? pub async fn search_users_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let limit = u64::from(body.limit) as usize; diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs index 6281744b..7e9de31e 100644 --- a/src/client_server/voip.rs +++ b/src/client_server/voip.rs @@ -11,7 +11,7 @@ type HmacSha1 = Hmac; /// TODO: Returns information about the recommended turn server. pub async fn turn_server_route( db: DatabaseGuard, - body: Ruma, + body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); diff --git a/src/database/account_data.rs b/src/database/account_data.rs index ec9d09e8..d85918f6 100644 --- a/src/database/account_data.rs +++ b/src/database/account_data.rs @@ -1,7 +1,7 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::error::ErrorKind, - events::{AnyEphemeralRoomEvent, EventType}, + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, serde::Raw, RoomId, UserId, }; @@ -22,7 +22,7 @@ impl AccountData { &self, room_id: Option<&RoomId>, user_id: &UserId, - event_type: EventType, + event_type: RoomAccountDataEventType, data: &T, globals: &super::globals::Globals, ) -> Result<()> { @@ -38,10 +38,10 @@ impl AccountData { let mut roomuserdataid = prefix.clone(); roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes()); roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(event_type.as_bytes()); + roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); let mut key = prefix; - key.extend_from_slice(event_type.as_bytes()); + key.extend_from_slice(event_type.to_string().as_bytes()); let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling if json.get("type").is_none() || json.get("content").is_none() { @@ -75,7 +75,7 @@ impl AccountData { &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: EventType, + kind: RoomAccountDataEventType, ) -> Result> { let mut key = room_id .map(|r| r.to_string()) @@ -85,7 +85,7 @@ impl AccountData { key.push(0xff); key.extend_from_slice(user_id.as_bytes()); key.push(0xff); - key.extend_from_slice(kind.as_ref().as_bytes()); + key.extend_from_slice(kind.to_string().as_bytes()); self.roomusertype_roomuserdataid .get(&key)? @@ -109,7 +109,7 @@ impl AccountData { room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { + ) -> Result>> { let mut userdata = HashMap::new(); let mut prefix = room_id @@ -131,7 +131,7 @@ impl AccountData { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(k, v)| { Ok::<_, Error>(( - EventType::try_from( + RoomAccountDataEventType::try_from( utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( || Error::bad_database("RoomUserData ID in db is invalid."), )?) diff --git a/src/database/admin.rs b/src/database/admin.rs index f2e66e43..4238c5f1 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -28,9 +28,9 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, topic::RoomTopicEventContent, }, - EventType, + RoomEventType, }, - identifiers::{EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId}, + EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; @@ -81,7 +81,7 @@ impl Admin { .rooms .build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMessage, + event_type: RoomEventType::RoomMessage, content: to_raw_value(&message) .expect("event is valid, we just created it"), unsigned: None, @@ -553,7 +553,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 1. The room create event db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -568,7 +568,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 2. Make conduit bot join db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: None, @@ -596,7 +596,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { users, ..Default::default() @@ -615,7 +615,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 4.1 Join Rules db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomJoinRules, + event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) .expect("event is valid, we just created it"), unsigned: None, @@ -631,7 +631,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 4.2 History Visibility db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomHistoryVisibility, + event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -649,7 +649,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { // 4.3 Guest Access db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomGuestAccess, + event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) .expect("event is valid, we just created it"), unsigned: None, @@ -667,7 +667,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { .expect("Room name is valid"); db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomName, + event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) .expect("event is valid, we just created it"), unsigned: None, @@ -682,7 +682,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomTopic, + event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: format!("Manage {}", db.globals.server_name()), }) @@ -704,7 +704,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCanonicalAlias, + event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { alias: Some(alias.clone()), alt_aliases: Vec::new(), @@ -758,7 +758,7 @@ pub(crate) async fn make_user_admin( // Invite and join the real user db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, displayname: None, @@ -781,7 +781,7 @@ pub(crate) async fn make_user_admin( )?; db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, displayname: Some(displayname), @@ -810,7 +810,7 @@ pub(crate) async fn make_user_admin( db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { users, ..Default::default() @@ -829,7 +829,7 @@ pub(crate) async fn make_user_admin( // Send welcome message db.rooms.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMessage, + event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", db.globals.server_name()).to_owned(), format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", db.globals.server_name()).to_owned(), diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 36f8454e..410300e1 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -11,7 +11,7 @@ use ruma::{ }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, EventType, + AnySyncRoomEvent, EventType, RoomEventType, StateEventType, }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, @@ -181,7 +181,7 @@ pub async fn send_push_notice( let power_levels: RoomPowerLevelsEventContent = db .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) @@ -293,7 +293,7 @@ async fn send_notice( // TODO: missed calls notifi.counts = NotificationCounts::new(unread, uint!(0)); - if event.kind == EventType::RoomEncrypted + if event.kind == RoomEventType::RoomEncrypted || tweaks .iter() .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) @@ -314,7 +314,7 @@ async fn send_notice( let content = serde_json::value::to_raw_value(&event.content).ok(); notifi.content = content.as_deref(); - if event.kind == EventType::RoomMember { + if event.kind == RoomEventType::RoomMember { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } @@ -323,7 +323,7 @@ async fn send_notice( let room_name = if let Some(room_name_pdu) = db.rooms - .room_state_get(&event.room_id, &EventType::RoomName, "")? + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? { serde_json::from_str::(room_name_pdu.content.get()) .map_err(|_| Error::bad_database("Invalid room name event in database."))? diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 88a07295..44f33446 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -21,7 +21,8 @@ use ruma::{ power_levels::RoomPowerLevelsEventContent, }, tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, EventType, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, RoomEventType, StateEventType, }, push::{Action, Ruleset, Tweak}, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, @@ -111,8 +112,8 @@ pub struct Rooms { pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, + pub(super) statekeyshort_cache: Mutex>, + pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock, Arc>>>>, pub(super) appservice_in_room_cache: RwLock, HashMap>>, pub(super) lazy_load_waiting: @@ -151,7 +152,7 @@ impl Rooms { pub fn state_full( &self, shortstatehash: u64, - ) -> Result>> { + ) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? .pop() @@ -166,7 +167,7 @@ impl Rooms { .map(|pdu| { Ok::<_, Error>(( ( - pdu.kind.clone(), + pdu.kind.to_string().into(), pdu.state_key .as_ref() .ok_or_else(|| Error::bad_database("State event has no state key."))? @@ -184,7 +185,7 @@ impl Rooms { pub fn state_get_id( &self, shortstatehash: u64, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result>> { let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { @@ -211,7 +212,7 @@ impl Rooms { pub fn state_get( &self, shortstatehash: u64, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result>> { self.state_get_id(shortstatehash, event_type, state_key)? @@ -254,7 +255,7 @@ impl Rooms { pub fn get_auth_events( &self, room_id: &RoomId, - kind: &EventType, + kind: &RoomEventType, sender: &UserId, state_key: Option<&str>, content: &serde_json::value::RawValue, @@ -272,7 +273,7 @@ impl Rooms { let mut sauthevents = auth_events .into_iter() .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type, &state_key) + self.get_shortstatekey(&event_type.to_string().into(), &state_key) .ok() .flatten() .map(|s| (s, (event_type, state_key))) @@ -764,7 +765,7 @@ impl Rooms { #[tracing::instrument(skip(self))] pub fn get_shortstatekey( &self, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result> { if let Some(short) = self @@ -776,7 +777,7 @@ impl Rooms { return Ok(Some(*short)); } - let mut statekey = event_type.as_ref().as_bytes().to_vec(); + let mut statekey = event_type.to_string().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(state_key.as_bytes()); @@ -820,7 +821,7 @@ impl Rooms { #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shortstatekey( &self, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, globals: &super::globals::Globals, ) -> Result { @@ -833,7 +834,7 @@ impl Rooms { return Ok(*short); } - let mut statekey = event_type.as_ref().as_bytes().to_vec(); + let mut statekey = event_type.to_string().as_bytes().to_vec(); statekey.push(0xff); statekey.extend_from_slice(state_key.as_bytes()); @@ -888,7 +889,7 @@ impl Rooms { } #[tracing::instrument(skip(self))] - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(EventType, String)> { + pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { if let Some(id) = self .shortstatekey_cache .lock() @@ -910,7 +911,7 @@ impl Rooms { .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; let event_type = - EventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { + StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") })?) .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; @@ -934,7 +935,7 @@ impl Rooms { pub fn room_state_full( &self, room_id: &RoomId, - ) -> Result>> { + ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { self.state_full(current_shortstatehash) } else { @@ -947,7 +948,7 @@ impl Rooms { pub fn room_state_get_id( &self, room_id: &RoomId, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { @@ -962,7 +963,7 @@ impl Rooms { pub fn room_state_get( &self, room_id: &RoomId, - event_type: &EventType, + event_type: &StateEventType, state_key: &str, ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { @@ -1281,7 +1282,7 @@ impl Rooms { { if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind, state_key) + .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) .unwrap() { unsigned.insert( @@ -1346,7 +1347,7 @@ impl Rooms { // See if the event matches any known pushers let power_levels: RoomPowerLevelsEventContent = db .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) @@ -1367,7 +1368,11 @@ impl Rooms { let rules_for_user = db .account_data - .get(None, user, EventType::PushRules)? + .get( + None, + user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| Ruleset::server_default(user)); @@ -1416,12 +1421,12 @@ impl Rooms { .increment_batch(&mut highlights.into_iter())?; match pdu.kind { - EventType::RoomRedaction => { + RoomEventType::RoomRedaction => { if let Some(redact_id) = &pdu.redacts { self.redact_pdu(redact_id, pdu)?; } } - EventType::RoomMember => { + RoomEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { #[derive(Deserialize)] struct ExtractMembership { @@ -1456,7 +1461,7 @@ impl Rooms { )?; } } - EventType::RoomMessage => { + RoomEventType::RoomMessage => { #[derive(Deserialize)] struct ExtractBody<'a> { #[serde(borrow)] @@ -1663,8 +1668,11 @@ impl Rooms { let states_parents = previous_shortstatehash .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - let shortstatekey = - self.get_or_create_shortstatekey(&new_pdu.kind, state_key, globals)?; + let shortstatekey = self.get_or_create_shortstatekey( + &new_pdu.kind.to_string().into(), + state_key, + globals, + )?; let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; @@ -1713,28 +1721,36 @@ impl Rooms { ) -> Result>> { let mut state = Vec::new(); // Add recommended events - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomCreate, "")? { - state.push(e.to_stripped_state_event()); - } if let Some(e) = - self.room_state_get(&invite_event.room_id, &EventType::RoomJoinRules, "")? + self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? { state.push(e.to_stripped_state_event()); } if let Some(e) = - self.room_state_get(&invite_event.room_id, &EventType::RoomCanonicalAlias, "")? + self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomAvatar, "")? { + if let Some(e) = self.room_state_get( + &invite_event.room_id, + &StateEventType::RoomCanonicalAlias, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? + { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomName, "")? { + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? + { state.push(e.to_stripped_state_event()); } if let Some(e) = self.room_state_get( &invite_event.room_id, - &EventType::RoomMember, + &StateEventType::RoomMember, invite_event.sender.as_str(), )? { state.push(e.to_stripped_state_event()); @@ -1807,7 +1823,7 @@ impl Rooms { .take(20) .collect::>(); - let create_event = self.room_state_get(room_id, &EventType::RoomCreate, "")?; + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -1845,7 +1861,9 @@ impl Rooms { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some(prev_pdu) = self.room_state_get(room_id, &event_type, state_key)? { + if let Some(prev_pdu) = + self.room_state_get(room_id, &event_type.to_string().into(), state_key)? + { unsigned.insert( "prev_content".to_owned(), serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), @@ -1888,7 +1906,6 @@ impl Rooms { let auth_check = state_res::auth_check( &room_version, &pdu, - create_prev_event, None::, // TODO: third_party_invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -2031,7 +2048,7 @@ impl Rooms { let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) - || pdu.kind == EventType::RoomMember + || pdu.kind == RoomEventType::RoomMember && pdu .state_key .as_ref() @@ -2231,7 +2248,7 @@ impl Rooms { // Check if the room has a predecessor if let Some(predecessor) = self - .room_state_get(room_id, &EventType::RoomCreate, "")? + .room_state_get(room_id, &StateEventType::RoomCreate, "")? .and_then(|create| serde_json::from_str(create.content.get()).ok()) .and_then(|content: RoomCreateEventContent| content.predecessor) { @@ -2264,13 +2281,13 @@ impl Rooms { if let Some(tag_event) = db.account_data.get::( Some(&predecessor.room_id), user_id, - EventType::Tag, + RoomAccountDataEventType::Tag, )? { db.account_data .update( Some(room_id), user_id, - EventType::Tag, + RoomAccountDataEventType::Tag, &tag_event, &db.globals, ) @@ -2278,10 +2295,11 @@ impl Rooms { }; // Copy direct chat flag - if let Some(mut direct_event) = - db.account_data - .get::(None, user_id, EventType::Direct)? - { + if let Some(mut direct_event) = db.account_data.get::( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + )? { let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { @@ -2295,7 +2313,7 @@ impl Rooms { db.account_data.update( None, user_id, - EventType::Direct, + GlobalAccountDataEventType::Direct.to_string().into(), &direct_event, &db.globals, )?; @@ -2322,7 +2340,9 @@ impl Rooms { .get::( None, // Ignored users are in global account data user_id, // Receiver - EventType::IgnoredUserList, + GlobalAccountDataEventType::IgnoredUserList + .to_string() + .into(), )? .map_or(false, |ignored| { ignored @@ -2578,7 +2598,7 @@ impl Rooms { let state_lock = mutex_state.lock().await; let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &EventType::RoomMember, user_id.as_str())? + self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -2592,7 +2612,7 @@ impl Rooms { self.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(user_id.to_string()), diff --git a/src/database/sending.rs b/src/database/sending.rs index b7d62c12..1ff2cdfa 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -23,7 +23,10 @@ use ruma::{ OutgoingRequest, }, device_id, - events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType}, + events::{ + push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType, + GlobalAccountDataEventType, + }, push, receipt::ReceiptType, uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, @@ -635,7 +638,11 @@ impl Sending { let rules_for_user = db .account_data - .get(None, &userid, EventType::PushRules) + .get( + None, + &userid, + GlobalAccountDataEventType::PushRules.to_string().into(), + ) .unwrap_or_default() .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); diff --git a/src/database/transaction_ids.rs b/src/database/transaction_ids.rs index 12b838ba..ed0970d1 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/transaction_ids.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::Result; -use ruma::{identifiers::TransactionId, DeviceId, UserId}; +use ruma::{DeviceId, TransactionId, UserId}; use super::abstraction::Tree; diff --git a/src/database/users.rs b/src/database/users.rs index a66fa93a..7c15f1d8 100644 --- a/src/database/users.rs +++ b/src/database/users.rs @@ -2,11 +2,10 @@ use crate::{utils, Error, Result}; use ruma::{ api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, EventType}, - identifiers::MxcUri, + events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, - UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, + UInt, UserId, }; use std::{collections::BTreeMap, mem, sync::Arc}; use tracing::warn; @@ -754,7 +753,7 @@ impl Users { for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { // Don't send key updates to unencrypted rooms if rooms - .room_state_get(&room_id, &EventType::RoomEncryption, "")? + .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() { continue; diff --git a/src/main.rs b/src/main.rs index a9047ecb..d20ee752 100644 --- a/src/main.rs +++ b/src/main.rs @@ -26,10 +26,7 @@ use http::{ Method, Uri, }; use opentelemetry::trace::{FutureExt, Tracer}; -use ruma::{ - api::{client::error::ErrorKind, IncomingRequest}, - Outgoing, -}; +use ruma::api::{client::error::ErrorKind, IncomingRequest}; use tokio::{signal, sync::RwLock}; use tower::ServiceBuilder; use tower_http::{ @@ -408,16 +405,15 @@ macro_rules! impl_ruma_handler { #[allow(non_snake_case)] impl RumaHandler<($($ty,)* Ruma,)> for F where - Req: Outgoing + 'static, - Req::Incoming: IncomingRequest + Send, + Req: IncomingRequest + Send + 'static, F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, - Fut: Future::OutgoingResponse, E>> + Fut: Future> + Send, E: IntoResponse, $( $ty: FromRequest + Send + 'static, )* { fn add_to_router(self, mut router: Router) -> Router { - let meta = Req::Incoming::METADATA; + let meta = Req::METADATA; let method_filter = method_to_filter(meta.method); for path in IntoIterator::into_iter([meta.unstable_path, meta.r0_path, meta.stable_path]).flatten() { diff --git a/src/pdu.rs b/src/pdu.rs index ec6c961b..aed2575f 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -2,7 +2,7 @@ use crate::Error; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, - AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, EventType, StateEvent, + AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, RoomEventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, @@ -29,7 +29,7 @@ pub struct PduEvent { pub sender: Box, pub origin_server_ts: UInt, #[serde(rename = "type")] - pub kind: EventType, + pub kind: RoomEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, @@ -51,10 +51,10 @@ impl PduEvent { self.unsigned = None; let allowed: &[&str] = match self.kind { - EventType::RoomMember => &["membership"], - EventType::RoomCreate => &["creator"], - EventType::RoomJoinRules => &["join_rule"], - EventType::RoomPowerLevels => &[ + RoomEventType::RoomMember => &["membership"], + RoomEventType::RoomCreate => &["creator"], + RoomEventType::RoomJoinRules => &["join_rule"], + RoomEventType::RoomPowerLevels => &[ "ban", "events", "events_default", @@ -64,7 +64,7 @@ impl PduEvent { "users", "users_default", ], - EventType::RoomHistoryVisibility => &["history_visibility"], + RoomEventType::RoomHistoryVisibility => &["history_visibility"], _ => &[], }; @@ -279,7 +279,7 @@ impl state_res::Event for PduEvent { &self.sender } - fn event_type(&self) -> &EventType { + fn event_type(&self) -> &RoomEventType { &self.kind } @@ -354,7 +354,7 @@ pub(crate) fn gen_event_id_canonical_json( #[derive(Debug, Deserialize)] pub struct PduBuilder { #[serde(rename = "type")] - pub event_type: EventType, + pub event_type: RoomEventType, pub content: Box, pub unsigned: Option>, pub state_key: Option, diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs index 119c3ea8..15360e58 100644 --- a/src/ruma_wrapper.rs +++ b/src/ruma_wrapper.rs @@ -1,9 +1,6 @@ use crate::Error; use ruma::{ - api::client::uiaa::UiaaResponse, - identifiers::{DeviceId, UserId}, - signatures::CanonicalJsonValue, - Outgoing, ServerName, + api::client::uiaa::UiaaResponse, signatures::CanonicalJsonValue, DeviceId, ServerName, UserId, }; use std::ops::Deref; @@ -11,8 +8,8 @@ use std::ops::Deref; mod axum; /// Extractor for Ruma request structs -pub struct Ruma { - pub body: T::Incoming, +pub struct Ruma { + pub body: T, pub sender_user: Option>, pub sender_device: Option>, pub sender_servername: Option>, @@ -21,8 +18,8 @@ pub struct Ruma { pub from_appservice: bool, } -impl Deref for Ruma { - type Target = T::Incoming; +impl Deref for Ruma { + type Target = T; fn deref(&self) -> &Self::Target { &self.body diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index c779e335..fdb140fe 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -18,7 +18,7 @@ use http::StatusCode; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, signatures::CanonicalJsonValue, - DeviceId, Outgoing, ServerName, UserId, + DeviceId, ServerName, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; @@ -29,8 +29,7 @@ use crate::{database::DatabaseGuard, server_server, Error, Result}; #[async_trait] impl FromRequest for Ruma where - T: Outgoing, - T::Incoming: IncomingRequest, + T: IncomingRequest, B: HttpBody + Send, B::Data: Send, B::Error: Into, @@ -44,7 +43,7 @@ where user_id: Option, } - let metadata = T::Incoming::METADATA; + let metadata = T::METADATA; let db = DatabaseGuard::from_request(req).await?; let auth_header = Option::>>::from_request(req).await?; let path_params = Path::>::from_request(req).await?; @@ -284,7 +283,7 @@ where debug!("{:?}", http_request); - let body = T::Incoming::try_from_http_request(http_request, &path_params).map_err(|e| { + let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { warn!("{:?}", e); Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; @@ -358,10 +357,7 @@ impl Credentials for XMatrix { } } -impl IntoResponse for RumaResponse -where - T: OutgoingResponse, -{ +impl IntoResponse for RumaResponse { fn into_response(self) -> Response { match self.0.try_into_http_response::() { Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(), diff --git a/src/server_server.rs b/src/server_server.rs index 371f2979..e9977f9e 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -45,7 +45,7 @@ use ruma::{ member::{MembershipState, RoomMemberEventContent}, server_acl::RoomServerAclEventContent, }, - EventType, + RoomEventType, StateEventType, }, int, receipt::ReceiptType, @@ -575,7 +575,7 @@ pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoRes /// Lists the public rooms on this server. pub async fn get_public_rooms_filtered_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -604,7 +604,7 @@ pub async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. pub async fn get_public_rooms_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -633,7 +633,7 @@ pub async fn get_public_rooms_route( /// Push EDUs and PDUs to this server. pub async fn send_transaction_message_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -924,7 +924,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( let create_event = db .rooms - .room_state_get(room_id, &EventType::RoomCreate, "") + .room_state_get(room_id, &StateEventType::RoomCreate, "") .map_err(|_| "Failed to ask database for event.".to_owned())? .ok_or_else(|| "Failed to find create event in db.".to_owned())?; @@ -1174,7 +1174,7 @@ fn handle_outlier_pdu<'a>( }; match auth_events.entry(( - auth_event.kind.clone(), + auth_event.kind.to_string().into(), auth_event .state_key .clone() @@ -1194,7 +1194,7 @@ fn handle_outlier_pdu<'a>( // The original create event must be in the auth events if auth_events - .get(&(EventType::RoomCreate, "".to_owned())) + .get(&(StateEventType::RoomCreate, "".to_owned())) .map(|a| a.as_ref()) != Some(create_event) { @@ -1216,9 +1216,8 @@ fn handle_outlier_pdu<'a>( if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.as_ref(), None::, // TODO: third party invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), + |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), ) .map_err(|_e| "Auth check failed".to_owned())? { @@ -1297,7 +1296,11 @@ async fn upgrade_outlier_to_timeline_pdu( if let Some(state_key) = &prev_pdu.state_key { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) + .get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; state.insert(shortstatekey, Arc::from(prev_event)); @@ -1342,7 +1345,11 @@ async fn upgrade_outlier_to_timeline_pdu( if let Some(state_key) = &prev_event.state_key { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) + .get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + &db.globals, + ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); // Now it's the state after the pdu @@ -1352,8 +1359,10 @@ async fn upgrade_outlier_to_timeline_pdu( let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok(k) = db.rooms.get_statekey_from_short(k) { - state.insert(k, id.clone()); + if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); } else { warn!("Failed to get_statekey_from_short."); } @@ -1387,7 +1396,11 @@ async fn upgrade_outlier_to_timeline_pdu( .map(|((event_type, state_key), event_id)| { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; Ok((shortstatekey, event_id)) }) @@ -1441,7 +1454,11 @@ async fn upgrade_outlier_to_timeline_pdu( let shortstatekey = db .rooms - .get_or_create_shortstatekey(&pdu.kind, &state_key, &db.globals) + .get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + &db.globals, + ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; match state.entry(shortstatekey) { @@ -1458,7 +1475,7 @@ async fn upgrade_outlier_to_timeline_pdu( // The original create event must still be in the state let create_shortstatekey = db .rooms - .get_shortstatekey(&EventType::RoomCreate, "") + .get_shortstatekey(&StateEventType::RoomCreate, "") .map_err(|_| "Failed to talk to db.")? .expect("Room exists"); @@ -1496,11 +1513,10 @@ async fn upgrade_outlier_to_timeline_pdu( let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.as_ref(), None::, // TODO: third party invite |k, s| { db.rooms - .get_shortstatekey(k, s) + .get_shortstatekey(&k.to_string().into(), s) .ok() .flatten() .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) @@ -1580,7 +1596,6 @@ async fn upgrade_outlier_to_timeline_pdu( let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, - previous_create.as_ref(), None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -1655,7 +1670,11 @@ async fn upgrade_outlier_to_timeline_pdu( if let Some(state_key) = &incoming_pdu.state_key { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) + .get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); @@ -1701,7 +1720,9 @@ async fn upgrade_outlier_to_timeline_pdu( .filter_map(|(k, id)| { db.rooms .get_statekey_from_short(k) - .map(|k| (k, id)) + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) .ok() }) @@ -1732,7 +1753,11 @@ async fn upgrade_outlier_to_timeline_pdu( .map(|((event_type, state_key), event_id)| { let shortstatekey = db .rooms - .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; db.rooms .compress_state_event(shortstatekey, &event_id, &db.globals) @@ -2151,7 +2176,7 @@ fn append_incoming_pdu<'a>( let matching_users = |users: &Regex| { users.is_match(pdu.sender.as_str()) - || pdu.kind == EventType::RoomMember + || pdu.kind == RoomEventType::RoomMember && pdu .state_key .as_ref() @@ -2298,7 +2323,7 @@ fn get_auth_chain_inner( /// - Only works if a user of this server is currently invited or joined the room pub async fn get_event_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2341,7 +2366,7 @@ pub async fn get_event_route( /// Retrieves events that the sender is missing. pub async fn get_missing_events_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2414,7 +2439,7 @@ pub async fn get_missing_events_route( /// - This does not include the event itself pub async fn get_event_authorization_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2462,7 +2487,7 @@ pub async fn get_event_authorization_route( /// Retrieves the current state of the room. pub async fn get_room_state_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2521,7 +2546,7 @@ pub async fn get_room_state_route( /// Retrieves the current state of the room. pub async fn get_room_state_ids_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2569,7 +2594,7 @@ pub async fn get_room_state_ids_route( /// Creates a join template. pub async fn create_join_event_template_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2598,7 +2623,7 @@ pub async fn create_join_event_template_route( let create_event = db .rooms - .room_state_get(&body.room_id, &EventType::RoomCreate, "")?; + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -2645,11 +2670,11 @@ pub async fn create_join_event_template_route( .expect("member event is valid value"); let state_key = body.user_id.to_string(); - let kind = EventType::RoomMember; + let kind = StateEventType::RoomMember; let auth_events = db.rooms.get_auth_events( &body.room_id, - &kind, + &kind.to_string().into(), &body.user_id, Some(&state_key), &content, @@ -2680,7 +2705,7 @@ pub async fn create_join_event_template_route( origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), - kind, + kind: kind.to_string().into(), content, state_key: Some(state_key), prev_events, @@ -2704,7 +2729,6 @@ pub async fn create_join_event_template_route( let auth_check = state_res::auth_check( &room_version, &pdu, - create_prev_event, None::, // TODO: third_party_invite |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) @@ -2849,7 +2873,7 @@ async fn create_join_event( /// Submits a signed join event. pub async fn create_join_event_v1_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_servername = body .sender_servername @@ -2866,7 +2890,7 @@ pub async fn create_join_event_v1_route( /// Submits a signed join event. pub async fn create_join_event_v2_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { let sender_servername = body .sender_servername @@ -2883,7 +2907,7 @@ pub async fn create_join_event_v2_route( /// Invites a remote user to a room. pub async fn create_invite_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -2993,7 +3017,7 @@ pub async fn create_invite_route( /// Gets information on all devices of the user. pub async fn get_devices_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -3041,7 +3065,7 @@ pub async fn get_devices_route( /// Resolve a room alias to a room id. pub async fn get_room_information_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -3066,7 +3090,7 @@ pub async fn get_room_information_route( /// Gets information on a profile. pub async fn get_profile_information_route( db: DatabaseGuard, - body: Ruma>, + body: Ruma, ) -> Result { if !db.globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); @@ -3395,7 +3419,7 @@ pub(crate) async fn fetch_join_signing_keys( fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { let acl_event = match db .rooms - .room_state_get(room_id, &EventType::RoomServerAcl, "")? + .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? { Some(acl) => acl, None => return Ok(()), From df4c38cb610c2ca8c3dcf09595b20c271ca26d70 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 7 Apr 2022 13:22:10 +0200 Subject: [PATCH 255/445] fix: remove warnings --- src/client_server/account.rs | 2 +- src/client_server/context.rs | 2 +- src/client_server/membership.rs | 8 -------- src/client_server/push.rs | 4 +--- src/client_server/state.rs | 2 +- src/client_server/sync.rs | 2 +- src/client_server/tag.rs | 2 +- src/client_server/to_device.rs | 1 - src/database/abstraction/sqlite.rs | 8 ++++---- src/database/pusher.rs | 2 +- src/database/rooms.rs | 12 ++--------- src/database/sending.rs | 5 +---- src/server_server.rs | 32 ------------------------------ 13 files changed, 14 insertions(+), 68 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index be14b926..820e4f1e 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -18,7 +18,7 @@ use ruma::{ events::{ room::member::{MembershipState, RoomMemberEventContent}, room::message::RoomMessageEventContent, - GlobalAccountDataEventType, RoomAccountDataEventType, RoomEventType, + GlobalAccountDataEventType, RoomEventType, }, push, UserId, }; diff --git a/src/client_server/context.rs b/src/client_server/context.rs index 8ecd6ecf..de7aae93 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -1,7 +1,7 @@ use crate::{database::DatabaseGuard, Error, Result, Ruma}; use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, - events::{EventType, StateEventType}, + events::StateEventType, }; use std::{collections::HashSet, convert::TryFrom}; use tracing::error; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 8fb2fec0..ac0715a4 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -828,14 +828,6 @@ pub(crate) async fn invite_helper<'a>( }) .transpose()?; - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content .map_or(RoomVersionId::V6, |create_event| create_event.room_version); diff --git a/src/client_server/push.rs b/src/client_server/push.rs index 5169b8bf..dc45ea0b 100644 --- a/src/client_server/push.rs +++ b/src/client_server/push.rs @@ -8,9 +8,7 @@ use ruma::{ set_pushrule_enabled, RuleKind, }, }, - events::{ - push_rules::PushRulesEvent, EventType, GlobalAccountDataEventType, RoomAccountDataEventType, - }, + events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; diff --git a/src/client_server/state.rs b/src/client_server/state.rs index c0fbf734..50fe9b4f 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -13,7 +13,7 @@ use ruma::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, }, - AnyStateEventContent, EventType, RoomEventType, StateEventType, + AnyStateEventContent, StateEventType, }, serde::Raw, EventId, RoomId, UserId, diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index de6a45a9..d61e6894 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -7,7 +7,7 @@ use ruma::{ }, events::{ room::member::{MembershipState, RoomMemberEventContent}, - EventType, RoomEventType, StateEventType, + RoomEventType, StateEventType, }, serde::Raw, DeviceId, RoomId, UserId, diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs index 03408862..98d895cd 100644 --- a/src/client_server/tag.rs +++ b/src/client_server/tag.rs @@ -3,7 +3,7 @@ use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ tag::{TagEvent, TagEventContent}, - EventType, RoomAccountDataEventType, + RoomAccountDataEventType, }, }; use std::collections::BTreeMap; diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 42364f56..5f4ac583 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -7,7 +7,6 @@ use ruma::{ client::{error::ErrorKind, to_device::send_event_to_device}, federation::{self, transactions::edu::DirectDeviceContent}, }, - events::EventType, to_device::DeviceIdOrAllDevices, }; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 730c1bca..7cfa81af 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -19,7 +19,7 @@ thread_local! { struct PreparedStatementIterator<'a> { pub iterator: Box + 'a>, - pub statement_ref: NonAliasingBox>, + pub _statement_ref: NonAliasingBox>, } impl Iterator for PreparedStatementIterator<'_> { @@ -184,7 +184,7 @@ impl SqliteTable { Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } } @@ -283,7 +283,7 @@ impl Tree for SqliteTable { ); Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } else { let statement = Box::leak(Box::new( @@ -309,7 +309,7 @@ impl Tree for SqliteTable { Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } } diff --git a/src/database/pusher.rs b/src/database/pusher.rs index 410300e1..6b906c24 100644 --- a/src/database/pusher.rs +++ b/src/database/pusher.rs @@ -11,7 +11,7 @@ use ruma::{ }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, EventType, RoomEventType, StateEventType, + AnySyncRoomEvent, RoomEventType, StateEventType, }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 44f33446..b9d0a874 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1835,14 +1835,6 @@ impl Rooms { }) .transpose()?; - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content .map_or(RoomVersionId::V6, |create_event| create_event.room_version); @@ -1978,7 +1970,7 @@ impl Rooms { self.room_servers(room_id).filter_map(|r| r.ok()).collect(); // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == EventType::RoomMember { + if pdu.kind == RoomEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key .as_ref() @@ -2001,7 +1993,7 @@ impl Rooms { // If the RoomMember event has a non-empty state_key, it is targeted at someone. // If it is our appservice user, we send this PDU to it. - if pdu.kind == EventType::RoomMember { + if pdu.kind == RoomEventType::RoomMember { if let Some(state_key_uid) = &pdu .state_key .as_ref() diff --git a/src/database/sending.rs b/src/database/sending.rs index 1ff2cdfa..4c830d6f 100644 --- a/src/database/sending.rs +++ b/src/database/sending.rs @@ -23,10 +23,7 @@ use ruma::{ OutgoingRequest, }, device_id, - events::{ - push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType, - GlobalAccountDataEventType, - }, + events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, push, receipt::ReceiptType, uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, diff --git a/src/server_server.rs b/src/server_server.rs index e9977f9e..d574c4ee 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -1201,18 +1201,6 @@ fn handle_outlier_pdu<'a>( return Err("Incoming event refers to wrong create event.".to_owned()); } - // If the previous event was the create event special rules apply - let previous_create = if incoming_pdu.auth_events.len() == 1 - && incoming_pdu.prev_events == incoming_pdu.auth_events - { - db.rooms - .get_pdu(&incoming_pdu.auth_events[0]) - .map_err(|e| e.to_string())? - .filter(|maybe_create| **maybe_create == *create_event) - } else { - None - }; - if !state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -1498,18 +1486,6 @@ async fn upgrade_outlier_to_timeline_pdu( state_at_incoming_event.expect("we always set this to some above"); // 11. Check the auth of the event passes based on the state of the event - // If the previous event was the create event special rules apply - let previous_create = if incoming_pdu.auth_events.len() == 1 - && incoming_pdu.prev_events == incoming_pdu.auth_events - { - db.rooms - .get_pdu(&incoming_pdu.auth_events[0]) - .map_err(|e| e.to_string())? - .filter(|maybe_create| **maybe_create == *create_event) - } else { - None - }; - let check_result = state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -2635,14 +2611,6 @@ pub async fn create_join_event_template_route( }) .transpose()?; - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - // If there was no create event yet, assume we are creating a version 6 room right now let room_version_id = create_event_content.map_or(RoomVersionId::V6, |create_event| create_event.room_version); From ada07de2048950feb106b9bfa9220ed0f85787a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Thu, 7 Apr 2022 12:11:55 +0000 Subject: [PATCH 256/445] feat: support user password resets --- src/config.rs | 2 ++ src/database.rs | 52 ++++++++++++++++++++++++++++++++++++++++- src/database/admin.rs | 47 ++++++++++++++++++++++++++++++++++++- src/database/globals.rs | 4 ++++ src/database/rooms.rs | 6 ++++- 5 files changed, 108 insertions(+), 3 deletions(-) diff --git a/src/config.rs b/src/config.rs index a6ab63e3..4a3a0544 100644 --- a/src/config.rs +++ b/src/config.rs @@ -68,6 +68,8 @@ pub struct Config { #[serde(default = "default_turn_ttl")] pub turn_ttl: u64, + pub emergency_password: Option, + #[serde(flatten)] pub catchall: BTreeMap, } diff --git a/src/database.rs b/src/database.rs index e0745c54..69cf3fc8 100644 --- a/src/database.rs +++ b/src/database.rs @@ -19,7 +19,14 @@ use abstraction::DatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; -use ruma::{DeviceId, EventId, RoomId, UserId}; +use ruma::{ + events::{ + push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, EventType, + GlobalAccountDataEvent, + }, + push::Ruleset, + DeviceId, EventId, RoomId, UserId, +}; use std::{ collections::{BTreeMap, HashMap, HashSet}, fs::{self, remove_dir_all}, @@ -747,6 +754,23 @@ impl Database { guard.rooms.edus.presenceid_presence.clear()?; guard.admin.start_handler(Arc::clone(&db), admin_receiver); + + // Set emergency access for the conduit user + match set_emergency_access(&guard) { + Ok(pwd_set) => { + if pwd_set { + warn!("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!"); + guard.admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!")); + } + } + Err(e) => { + error!( + "Could not set the configured emergency password for the conduit user: {}", + e + ) + } + }; + guard .sending .start_handler(Arc::clone(&db), sending_receiver); @@ -928,6 +952,32 @@ impl Database { } } +/// Sets the emergency password and push rules for the @conduit account in case emergency password is set +fn set_emergency_access(db: &Database) -> Result { + let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("@conduit:server_name is a valid UserId"); + + db.users + .set_password(&conduit_user, db.globals.emergency_password().as_deref())?; + + let (ruleset, res) = match db.globals.emergency_password() { + Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), + None => (Ruleset::new(), Ok(false)), + }; + + db.account_data.update( + None, + &conduit_user, + EventType::PushRules, + &GlobalAccountDataEvent { + content: PushRulesEventContent { global: ruleset }, + }, + &db.globals, + )?; + + res +} + pub struct DatabaseGuard(OwnedRwLockReadGuard); impl Deref for DatabaseGuard { diff --git a/src/database/admin.rs b/src/database/admin.rs index f2e66e43..f5f3ba6d 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -8,7 +8,7 @@ use std::{ use crate::{ error::{Error, Result}, pdu::PduBuilder, - server_server, + server_server, utils, utils::HtmlEscape, Database, PduEvent, }; @@ -262,6 +262,12 @@ enum AdminCommand { /// Show configuration values ShowConfig, + + /// Reset user password + ResetPassword { + /// Username of the user for whom the password should be reset + username: String, + }, } fn process_admin_command( @@ -435,6 +441,45 @@ fn process_admin_command( // Construct and send the response RoomMessageEventContent::text_plain(format!("{}", db.globals.config)) } + AdminCommand::ResetPassword { username } => { + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + db.globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + + // Check if the specified user is valid + if !db.users.exists(&user_id)? + || db.users.is_deactivated(&user_id)? + || user_id + == UserId::parse_with_server_name("conduit", db.globals.server_name()) + .expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain( + "The specified user does not exist or is deactivated!", + )); + } + + let new_password = utils::random_string(20); + + match db.users.set_password(&user_id, Some(new_password.as_str())) { + Ok(()) => RoomMessageEventContent::text_plain(format!( + "Successfully reset the password for user {}: {}", + user_id, new_password + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Couldn't reset the password for user {}: {}", + user_id, e + )), + } + } }; Ok(reply_message_content) diff --git a/src/database/globals.rs b/src/database/globals.rs index 9a9163be..ee7db539 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -264,6 +264,10 @@ impl Globals { &self.config.turn_secret } + pub fn emergency_password(&self) -> &Option { + &self.config.emergency_password + } + /// TODO: the key valid until timestamp is only honored in room version > 4 /// Remove the outdated keys and insert the new ones. /// diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 88a07295..7939edc4 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1491,7 +1491,11 @@ impl Rooms { let server_user = format!("@conduit:{}", db.globals.server_name()); let to_conduit = body.starts_with(&format!("{}: ", server_user)); - let from_conduit = pdu.sender == server_user; + + // This will evaluate to false if the emergency password is set up so that + // the administrator can execute commands as conduit + let from_conduit = + pdu.sender == server_user && db.globals.emergency_password().is_none(); if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { db.admin.process_message(body.to_string()); From d81216cad7cbb9cb7d0a91bdccbe9910555b4a8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 18 Jan 2022 16:53:25 +0100 Subject: [PATCH 257/445] improvement: preparing for room version 9 --- src/database.rs | 6 ++--- src/pdu.rs | 2 +- src/server_server.rs | 55 ++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 59 insertions(+), 4 deletions(-) diff --git a/src/database.rs b/src/database.rs index 69cf3fc8..4a03f18c 100644 --- a/src/database.rs +++ b/src/database.rs @@ -21,8 +21,8 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; use ruma::{ events::{ - push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, EventType, - GlobalAccountDataEvent, + push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, + GlobalAccountDataEvent, GlobalAccountDataEventType, }, push::Ruleset, DeviceId, EventId, RoomId, UserId, @@ -968,7 +968,7 @@ fn set_emergency_access(db: &Database) -> Result { db.account_data.update( None, &conduit_user, - EventType::PushRules, + GlobalAccountDataEventType::PushRules.to_string().into(), &GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, }, diff --git a/src/pdu.rs b/src/pdu.rs index aed2575f..3b905336 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -51,7 +51,7 @@ impl PduEvent { self.unsigned = None; let allowed: &[&str] = match self.kind { - RoomEventType::RoomMember => &["membership"], + RoomEventType::RoomMember => &["join_authorised_via_users_server", "membership"], RoomEventType::RoomCreate => &["creator"], RoomEventType::RoomJoinRules => &["join_rule"], RoomEventType::RoomPowerLevels => &[ diff --git a/src/server_server.rs b/src/server_server.rs index d574c4ee..596a54e2 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -42,6 +42,7 @@ use ruma::{ receipt::{ReceiptEvent, ReceiptEventContent}, room::{ create::RoomCreateEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, server_acl::RoomServerAclEventContent, }, @@ -2590,6 +2591,33 @@ pub async fn create_join_event_template_route( acl_check(sender_servername, &body.room_id, &db)?; + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = db + .rooms + .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Conduit does not support restricted rooms yet.", + )); + } + } + let prev_events: Vec<_> = db .rooms .get_pdu_leaves(&body.room_id)? @@ -2749,6 +2777,33 @@ async fn create_join_event( acl_check(sender_servername, room_id, db)?; + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Conduit does not support restricted rooms yet.", + )); + } + } + // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = db .rooms From 714873694db0f96a56c50064779db6b48972dca5 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 1 Nov 2021 01:58:26 +0000 Subject: [PATCH 258/445] Refactor room version support, add default room version config --- src/client_server/capabilities.rs | 20 +++++++++++---- src/client_server/membership.rs | 15 ++++++----- src/client_server/room.rs | 8 +++--- src/config.rs | 11 ++++++++- src/database/globals.rs | 41 ++++++++++++++++++++++++++++++- src/database/rooms.rs | 18 ++++++++++---- src/server_server.rs | 11 ++++++--- 7 files changed, 96 insertions(+), 28 deletions(-) diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 952db581..915cd7d2 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,9 +1,8 @@ -use crate::{Result, Ruma}; +use crate::{database::DatabaseGuard, Result, Ruma}; use ruma::{ api::client::discovery::get_capabilities::{ self, Capabilities, RoomVersionStability, RoomVersionsCapability, }, - RoomVersionId, }; use std::collections::BTreeMap; @@ -11,15 +10,26 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( + db: DatabaseGuard, _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); - available.insert(RoomVersionId::V5, RoomVersionStability::Stable); - available.insert(RoomVersionId::V6, RoomVersionStability::Stable); + if db.globals.allow_unstable_room_versions() { + for room_version in &db.globals.unstable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Stable); + } + } else { + for room_version in &db.globals.unstable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Unstable); + } + } + for room_version in &db.globals.stable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Stable); + } let mut capabilities = Capabilities::new(); capabilities.room_versions = RoomVersionsCapability { - default: RoomVersionId::V6, + default: db.globals.default_room_version(), available, }; diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index ac0715a4..0f440f48 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -492,7 +492,7 @@ async fn join_room_by_id_helper( federation::membership::prepare_join_event::v1::Request { room_id, user_id: sender_user, - ver: &[RoomVersionId::V5, RoomVersionId::V6], + ver: &db.globals.supported_room_versions(), }, ) .await; @@ -507,11 +507,7 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) - if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 => - { - room_version - } + Some(room_version) if db.rooms.is_supported_version(&db, &room_version) => room_version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -828,9 +824,12 @@ pub(crate) async fn invite_helper<'a>( }) .transpose()?; - // If there was no create event yet, assume we are creating a version 6 room right now + // If there was no create event yet, assume we are creating a room with the default + // version right now let room_version_id = create_event_content - .map_or(RoomVersionId::V6, |create_event| create_event.room_version); + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); diff --git a/src/client_server/room.rs b/src/client_server/room.rs index 1b3b8409..a5b79705 100644 --- a/src/client_server/room.rs +++ b/src/client_server/room.rs @@ -23,7 +23,7 @@ use ruma::{ }, int, serde::{CanonicalJsonObject, JsonObject}, - RoomAliasId, RoomId, RoomVersionId, + RoomAliasId, RoomId, }; use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; @@ -100,7 +100,7 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if room_version == RoomVersionId::V5 || room_version == RoomVersionId::V6 { + if db.rooms.is_supported_version(&db, &room_version) { room_version } else { return Err(Error::BadRequest( @@ -109,7 +109,7 @@ pub async fn create_room_route( )); } } - None => RoomVersionId::V6, + None => db.globals.default_room_version(), }; let content = match &body.creation_content { @@ -484,7 +484,7 @@ pub async fn upgrade_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !matches!(body.new_version, RoomVersionId::V5 | RoomVersionId::V6) { + if !db.rooms.is_supported_version(&db, &body.new_version) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", diff --git a/src/config.rs b/src/config.rs index 4a3a0544..29af8839 100644 --- a/src/config.rs +++ b/src/config.rs @@ -4,7 +4,7 @@ use std::{ net::{IpAddr, Ipv4Addr}, }; -use ruma::ServerName; +use ruma::{RoomVersionId, ServerName}; use serde::{de::IgnoredAny, Deserialize}; use tracing::warn; @@ -46,6 +46,10 @@ pub struct Config { pub allow_federation: bool, #[serde(default = "true_fn")] pub allow_room_creation: bool, + #[serde(default = "true_fn")] + pub allow_unstable_room_versions: bool, + #[serde(default = "default_default_room_version")] + pub default_room_version: RoomVersionId, #[serde(default = "false_fn")] pub allow_jaeger: bool, #[serde(default = "false_fn")] @@ -246,3 +250,8 @@ fn default_log() -> String { fn default_turn_ttl() -> u64 { 60 * 60 * 24 } + +// I know, it's a great name +fn default_default_room_version() -> RoomVersionId { + RoomVersionId::V6 +} diff --git a/src/database/globals.rs b/src/database/globals.rs index ee7db539..a12f4626 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -4,7 +4,8 @@ use ruma::{ client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId, + DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + ServerSigningKeyId, UserId, }; use std::{ collections::{BTreeMap, HashMap}, @@ -41,6 +42,8 @@ pub struct Globals { jwt_decoding_key: Option>, federation_client: reqwest::Client, default_client: reqwest::Client, + pub stable_room_versions: Vec, + pub unstable_room_versions: Vec, pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, @@ -145,6 +148,11 @@ impl Globals { }) .build()?; + // Supported and stable room versions + let stable_room_versions = vec![RoomVersionId::V6]; + // Experimental, partially supported room versions + let unstable_room_versions = vec![RoomVersionId::V5]; + let s = Self { globals, config, @@ -162,6 +170,8 @@ impl Globals { default_client, server_signingkeys, jwt_decoding_key, + stable_room_versions, + unstable_room_versions, bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), @@ -232,6 +242,22 @@ impl Globals { self.config.allow_room_creation } + pub fn allow_unstable_room_versions(&self) -> bool { + self.config.allow_unstable_room_versions + } + + pub fn default_room_version(&self) -> RoomVersionId { + if self + .supported_room_versions() + .contains(&self.config.default_room_version.clone()) + { + self.config.default_room_version.clone() + } else { + error!("Room version in config isn't supported, falling back to Version 6"); + RoomVersionId::V6 + } + } + pub fn trusted_servers(&self) -> &[Box] { &self.config.trusted_servers } @@ -268,6 +294,19 @@ impl Globals { &self.config.emergency_password } + pub fn supported_room_versions(&self) -> Vec { + let mut room_versions: Vec = vec![]; + self.stable_room_versions + .iter() + .for_each(|room_version| room_versions.push(room_version.clone())); + if self.allow_unstable_room_versions() { + self.unstable_room_versions + .iter() + .for_each(|room_version| room_versions.push(room_version.clone())); + }; + room_versions + } + /// TODO: the key valid until timestamp is only honored in room version > 4 /// Remove the outdated keys and insert the new ones. /// diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 07772e7a..6616305e 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -133,6 +133,12 @@ pub struct Rooms { } impl Rooms { + /// Returns true if a given room version is supported + #[tracing::instrument(skip(self, db))] + pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { + db.globals.supported_room_versions().contains(room_version) + } + /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] @@ -1839,9 +1845,13 @@ impl Rooms { }) .transpose()?; - // If there was no create event yet, assume we are creating a version 6 room right now + + // If there was no create event yet, assume we are creating a room with the default + // version right now let room_version_id = create_event_content - .map_or(RoomVersionId::V6, |create_event| create_event.room_version); + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = @@ -2672,9 +2682,7 @@ impl Rooms { let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(version) if version == RoomVersionId::V5 || version == RoomVersionId::V6 => { - version - } + Some(version) if self.is_supported_version(&db, &version) => version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; diff --git a/src/server_server.rs b/src/server_server.rs index 596a54e2..19c95832 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2639,9 +2639,12 @@ pub async fn create_join_event_template_route( }) .transpose()?; - // If there was no create event yet, assume we are creating a version 6 room right now - let room_version_id = - create_event_content.map_or(RoomVersionId::V6, |create_event| create_event.room_version); + // If there was no create event yet, assume we are creating a room with the default version + // right now + let room_version_id = create_event_content + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); if !body.ver.contains(&room_version_id) { @@ -2943,7 +2946,7 @@ pub async fn create_invite_route( acl_check(sender_servername, &body.room_id, &db)?; - if body.room_version != RoomVersionId::V5 && body.room_version != RoomVersionId::V6 { + if !db.rooms.is_supported_version(&db, &body.room_version) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), From d8a3b257f2b052d199c3e38d9e1d48d6d6c0b6bf Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 1 Nov 2021 02:22:32 +0000 Subject: [PATCH 259/445] Enable room version 4 --- src/client_server/capabilities.rs | 6 ++---- src/database/globals.rs | 2 +- src/database/rooms.rs | 1 - src/server_server.rs | 6 +++--- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs index 915cd7d2..417ad29d 100644 --- a/src/client_server/capabilities.rs +++ b/src/client_server/capabilities.rs @@ -1,8 +1,6 @@ use crate::{database::DatabaseGuard, Result, Ruma}; -use ruma::{ - api::client::discovery::get_capabilities::{ - self, Capabilities, RoomVersionStability, RoomVersionsCapability, - }, +use ruma::api::client::discovery::get_capabilities::{ + self, Capabilities, RoomVersionStability, RoomVersionsCapability, }; use std::collections::BTreeMap; diff --git a/src/database/globals.rs b/src/database/globals.rs index a12f4626..c2ce8a5a 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -151,7 +151,7 @@ impl Globals { // Supported and stable room versions let stable_room_versions = vec![RoomVersionId::V6]; // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V5]; + let unstable_room_versions = vec![RoomVersionId::V4, RoomVersionId::V5]; let s = Self { globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 6616305e..0bccc84f 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1845,7 +1845,6 @@ impl Rooms { }) .transpose()?; - // If there was no create event yet, assume we are creating a room with the default // version right now let room_version_id = create_event_content diff --git a/src/server_server.rs b/src/server_server.rs index 19c95832..6d589475 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -2592,9 +2592,9 @@ pub async fn create_join_event_template_route( acl_check(sender_servername, &body.room_id, &db)?; // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = db - .rooms - .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + let join_rules_event = + db.rooms + .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; let join_rules_event_content: Option = join_rules_event .as_ref() From 4b28146ee7837451511a660cfb83130373ca38d3 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 1 Nov 2021 08:57:27 +0000 Subject: [PATCH 260/445] Support room version 3 --- src/client_server/membership.rs | 3 ++- src/database/globals.rs | 2 +- src/database/rooms.rs | 23 +++++++++++++++++++++++ src/pdu.rs | 16 ++++++++++++---- src/server_server.rs | 6 +++--- 5 files changed, 41 insertions(+), 9 deletions(-) diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 0f440f48..65107a3c 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -975,7 +975,8 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event) { + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event, &db) + { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json diff --git a/src/database/globals.rs b/src/database/globals.rs index c2ce8a5a..b1afd96c 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -151,7 +151,7 @@ impl Globals { // Supported and stable room versions let stable_room_versions = vec![RoomVersionId::V6]; // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V4, RoomVersionId::V5]; + let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let s = Self { globals, diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 0bccc84f..31333658 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3437,4 +3437,27 @@ impl Rooms { Ok(()) } + + /// Returns the room's version. + #[tracing::instrument(skip(self))] + pub fn get_room_version(&self, room_id: &RoomId) -> RoomVersionId { + let create_event = self + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .unwrap(); + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose() + .unwrap(); + + create_event_content + .map(|create_event| create_event.room_version) + .expect("Invalid room version") + } } diff --git a/src/pdu.rs b/src/pdu.rs index 3b905336..6e2bf5aa 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -1,11 +1,11 @@ -use crate::Error; +use crate::{Database, Error}; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, RoomEventType, StateEvent, }, serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, + state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{ @@ -332,16 +332,24 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, + db: &Database, ) -> crate::Result<(Box, CanonicalJsonObject)> { - let value = serde_json::from_str(pdu.get()).map_err(|e| { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; + let room_id = value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + .expect("Invalid room id in event"); + + let room_version_id = db.rooms.get_room_version(&room_id); + let event_id = format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &RoomVersionId::V6) + ruma::signatures::reference_hash(&value, &room_version_id) .expect("ruma can calculate reference hashes") ) .try_into() diff --git a/src/server_server.rs b/src/server_server.rs index 6d589475..e95c4c0f 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -659,7 +659,7 @@ pub async fn send_transaction_message_route( for pdu in &body.pdus { // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -1859,7 +1859,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( Ok(res) => { warn!("Got {} over federation", next_id); let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu) { + match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { Ok(t) => t, Err(_) => { back_off((*next_id).to_owned()); @@ -2820,7 +2820,7 @@ async fn create_join_event( // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json From d655f4c1bee05e69765f0d4c76c0f605244ed17d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 5 Nov 2021 20:47:11 +0000 Subject: [PATCH 261/445] Cleanup rooms.rs, globals.rs, and pdu.rs --- src/database/globals.rs | 28 ++++++++++++---------------- src/database/rooms.rs | 15 ++++++--------- src/pdu.rs | 2 +- 3 files changed, 19 insertions(+), 26 deletions(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index b1afd96c..9909ebd7 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -153,7 +153,7 @@ impl Globals { // Experimental, partially supported room versions let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; - let s = Self { + let mut s = Self { globals, config, keypair: Arc::new(keypair), @@ -184,6 +184,14 @@ impl Globals { fs::create_dir_all(s.get_media_folder())?; + if !s + .supported_room_versions() + .contains(&s.config.default_room_version) + { + error!("Room version in config isn't supported, falling back to Version 6"); + s.config.default_room_version = RoomVersionId::V6; + }; + Ok(s) } @@ -247,15 +255,7 @@ impl Globals { } pub fn default_room_version(&self) -> RoomVersionId { - if self - .supported_room_versions() - .contains(&self.config.default_room_version.clone()) - { - self.config.default_room_version.clone() - } else { - error!("Room version in config isn't supported, falling back to Version 6"); - RoomVersionId::V6 - } + self.config.default_room_version.clone() } pub fn trusted_servers(&self) -> &[Box] { @@ -296,13 +296,9 @@ impl Globals { pub fn supported_room_versions(&self) -> Vec { let mut room_versions: Vec = vec![]; - self.stable_room_versions - .iter() - .for_each(|room_version| room_versions.push(room_version.clone())); + room_versions.extend(self.stable_room_versions.clone()); if self.allow_unstable_room_versions() { - self.unstable_room_versions - .iter() - .for_each(|room_version| room_versions.push(room_version.clone())); + room_versions.extend(self.unstable_room_versions.clone()); }; room_versions } diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 31333658..25337b3b 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -3440,10 +3440,8 @@ impl Rooms { /// Returns the room's version. #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> RoomVersionId { - let create_event = self - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .unwrap(); + pub fn get_room_version(&self, room_id: &RoomId) -> Result { + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -3453,11 +3451,10 @@ impl Rooms { Error::bad_database("Invalid create event in db.") }) }) - .transpose() - .unwrap(); - - create_event_content + .transpose()?; + let room_version = create_event_content .map(|create_event| create_event.room_version) - .expect("Invalid room version") + .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; + Ok(room_version) } } diff --git a/src/pdu.rs b/src/pdu.rs index 6e2bf5aa..e26739f2 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -349,7 +349,7 @@ pub(crate) fn gen_event_id_canonical_json( let event_id = format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &room_version_id) + ruma::signatures::reference_hash(&value, &room_version_id?) .expect("ruma can calculate reference hashes") ) .try_into() From 686319e2e311970c821e555b383629e1b6cbebe1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 18 Jan 2022 17:24:34 +0100 Subject: [PATCH 262/445] fix: error handling --- src/pdu.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pdu.rs b/src/pdu.rs index e26739f2..20ec01ea 100644 --- a/src/pdu.rs +++ b/src/pdu.rs @@ -342,7 +342,7 @@ pub(crate) fn gen_event_id_canonical_json( let room_id = value .get("room_id") .and_then(|id| RoomId::parse(id.as_str()?).ok()) - .expect("Invalid room id in event"); + .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; let room_version_id = db.rooms.get_room_version(&room_id); From 0ae39807a478370a769217d01fa33514299a2b35 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 18 Feb 2022 13:39:15 +0100 Subject: [PATCH 263/445] Add V9 to list of allowed versions --- src/database/globals.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/globals.rs b/src/database/globals.rs index 9909ebd7..797e5b1d 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -149,7 +149,7 @@ impl Globals { .build()?; // Supported and stable room versions - let stable_room_versions = vec![RoomVersionId::V6]; + let stable_room_versions = vec![RoomVersionId::V6, RoomVersionId::V7, RoomVersionId::V8, RoomVersionId::V9]; // Experimental, partially supported room versions let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; From e4600ccfef51a03029161790d0271c174958942d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Fri, 18 Feb 2022 13:41:37 +0100 Subject: [PATCH 264/445] bump ruma --- Cargo.lock | 498 +++++++++++++++++++++------------------- src/database/globals.rs | 7 +- 2 files changed, 268 insertions(+), 237 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index cd518259..3a251b66 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20,7 +20,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.6", "once_cell", "version_check", ] @@ -84,9 +84,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-compression" -version = "0.3.8" +version = "0.3.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5443ccbb270374a2b1055fc72da40e1f237809cd6bb0e97e66d264cd138473a6" +checksum = "f2bf394cfbbe876f0ac67b13b6ca819f9c9f2fb9ec67223cceb1555fbab1c31a" dependencies = [ "brotli", "flate2", @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.52" +version = "0.1.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "061a7acccaa286c011ddc30970520b98fa40e00c9d644633fb26b5fc63a265e3" +checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" dependencies = [ "proc-macro2", "quote", @@ -118,15 +118,15 @@ dependencies = [ [[package]] name = "autocfg" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5611d4977882c5af1c0f7a34d51b5d87f784f86912bb543986b014ea4995ef93" +checksum = "47594e438a243791dba58124b6669561f5baa14cb12046641d8008bf035e5a25" dependencies = [ "async-trait", "axum-core", @@ -137,7 +137,7 @@ dependencies = [ "http", "http-body", "hyper", - "itoa 1.0.1", + "itoa", "matchit", "memchr", "mime", @@ -155,9 +155,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.0" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95cd109b3e93c9541dcce5b0219dcf89169dcc58c1bebed65082808324258afb" +checksum = "9a671c9ae99531afdd5d3ee8340b8da547779430689947144c140fc74a740244" dependencies = [ "async-trait", "bytes", @@ -253,11 +253,20 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +dependencies = [ + "generic-array", +] + [[package]] name = "brotli" -version = "3.3.3" +version = "3.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f838e47a451d5a8fa552371f80024dd6ace9b7acdf25c4c3d0f9bc6816fb1c39" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -282,9 +291,9 @@ checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" [[package]] name = "bytemuck" -version = "1.7.3" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "439989e6b8c38d1b6570a384ef1e49c8848128f5a97f3914baef02920842712f" +checksum = "cdead85bdec19c194affaeeb670c0e41fe23de31459efd1c174d049269cf02cc" [[package]] name = "byteorder" @@ -300,9 +309,9 @@ checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" [[package]] name = "cc" -version = "1.0.72" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22a9137b95ea06864e018375b72adfb7db6e6f68cfc8df5a04d00288050485ee" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" dependencies = [ "jobserver", ] @@ -343,9 +352,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.0" +version = "1.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa66045b9cb23c2e9c1520732030608b02ee07e5cfaa5a521ec15ded7fa24c90" +checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" dependencies = [ "glob", "libc", @@ -354,9 +363,9 @@ dependencies = [ [[package]] name = "clap" -version = "3.0.10" +version = "3.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a30c3bf9ff12dfe5dae53f0a96e0febcd18420d1c0e7fad77796d9d5c4b5375" +checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" dependencies = [ "bitflags", "clap_derive", @@ -368,11 +377,11 @@ dependencies = [ [[package]] name = "clap_derive" -version = "3.0.6" +version = "3.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "517358c28fcef6607bf6f76108e02afad7e82297d132a6b846dcc1fc3efcd153" +checksum = "a3aab4734e083b809aaf5794e14e756d1c798d2c69c7f7de7a09a2f5214993c1" dependencies = [ - "heck 0.4.0", + "heck", "proc-macro-error", "proc-macro2", "quote", @@ -409,7 +418,7 @@ dependencies = [ "opentelemetry-jaeger", "parking_lot", "persy", - "rand 0.8.4", + "rand 0.8.5", "regex", "reqwest", "ring", @@ -420,7 +429,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "sha-1", + "sha-1 0.9.8", "sled", "thiserror", "thread_local", @@ -466,9 +475,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" dependencies = [ "libc", ] @@ -490,9 +499,9 @@ checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" [[package]] name = "crc32fast" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2209c310e29876f7f0b2721e7e26b84aff178aa3da5d091f9bfbf47669e60e3" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ "cfg-if 1.0.0", ] @@ -507,18 +516,18 @@ dependencies = [ "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", - "crossbeam-queue 0.3.3", - "crossbeam-utils 0.8.6", + "crossbeam-queue 0.3.5", + "crossbeam-utils 0.8.8", ] [[package]] name = "crossbeam-channel" -version = "0.5.2" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e54ea8bc3fb1ee042f5aace6e3c6e025d3874866da222930f70ce62aceba0bfa" +checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", ] [[package]] @@ -529,17 +538,18 @@ checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", ] [[package]] name = "crossbeam-epoch" -version = "0.9.6" +version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97242a70df9b89a65d0b6df3c4bf5b9ce03c5b7309019777fbde37e7537f8762" +checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" dependencies = [ + "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", "lazy_static", "memoffset", "scopeguard", @@ -556,12 +566,12 @@ dependencies = [ [[package]] name = "crossbeam-queue" -version = "0.3.3" +version = "0.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b979d76c9fcb84dffc80a73f7290da0f83e4c95773494674cb44b76d13a7a110" +checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", ] [[package]] @@ -576,14 +586,24 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.6" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfcae03edb34f947e64acdb1c33ec169824e20657e9ecb61cef6c8c74dcb8120" +checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" dependencies = [ "cfg-if 1.0.0", "lazy_static", ] +[[package]] +name = "crypto-common" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +dependencies = [ + "generic-array", + "typenum", +] + [[package]] name = "crypto-mac" version = "0.11.1" @@ -596,12 +616,12 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.0" +version = "3.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" +checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" dependencies = [ "byteorder", - "digest", + "digest 0.9.0", "rand_core 0.5.1", "subtle", "zeroize", @@ -641,6 +661,16 @@ dependencies = [ "generic-array", ] +[[package]] +name = "digest" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +dependencies = [ + "block-buffer 0.10.2", + "crypto-common", +] + [[package]] name = "directories" version = "3.0.2" @@ -652,9 +682,9 @@ dependencies = [ [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", @@ -663,9 +693,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.3.0" +version = "1.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74e1069e39f1454367eb2de793ed062fac4c35c2934b76a81d90dd9abcd28816" +checksum = "3d5c4b5e5959dc2c2b89918d8e2cc40fcdd623cef026ed09d2f0ee05199dc8e4" dependencies = [ "signature", ] @@ -692,20 +722,20 @@ checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" [[package]] name = "encoding_rs" -version = "0.8.30" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7896dc8abb250ffdda33912550faa54c88ec8b998dec0b2c55ab224921ce11df" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ "cfg-if 1.0.0", ] [[package]] name = "enum-as-inner" -version = "0.3.3" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" dependencies = [ - "heck 0.3.3", + "heck", "proc-macro2", "quote", "syn", @@ -783,9 +813,9 @@ checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" [[package]] name = "futures" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28560757fe2bb34e79f907794bb6b22ae8b0e5c669b638a1132f2592b19035b4" +checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" dependencies = [ "futures-channel", "futures-core", @@ -798,9 +828,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba3dda0b6588335f360afc675d0564c17a77a2bda81ca178a4b6081bd86c7f0b" +checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" dependencies = [ "futures-core", "futures-sink", @@ -808,15 +838,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0c8ff0461b82559810cdccfde3215c3f373807f5e5232b71479bff7bb2583d7" +checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" [[package]] name = "futures-executor" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "29d6d2ff5bb10fb95c85b8ce46538a2e5f5e7fdc755623a7d4529ab8a4ed9d2a" +checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" dependencies = [ "futures-core", "futures-task", @@ -825,15 +855,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f9d34af5a1aac6fb380f735fe510746c38067c5bf16c7fd250280503c971b2" +checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" [[package]] name = "futures-macro" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6dbd947adfffb0efc70599b3ddcf7b5597bb5fa9e245eb99f62b3a5f7bb8bd3c" +checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" dependencies = [ "proc-macro2", "quote", @@ -842,21 +872,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3055baccb68d74ff6480350f8d6eb8fcfa3aa11bdc1a1ae3afdd0514617d508" +checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" [[package]] name = "futures-task" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ee7c6485c30167ce4dfb83ac568a849fe53274c831081476ee13e0dce1aad72" +checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" [[package]] name = "futures-util" -version = "0.3.19" +version = "0.3.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b5cf40b47a271f77a8b1bec03ca09044d99d2372c0de244e66430761127164" +checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" dependencies = [ "futures-channel", "futures-core", @@ -902,9 +932,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.4" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "418d37c8b1d42553c93648be529cb70f920d3baf8ef469b74b9638df426e0b4c" +checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" dependencies = [ "cfg-if 1.0.0", "libc", @@ -929,9 +959,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.10" +version = "0.3.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c9de88456263e249e241fcd211d3954e2c9b0ef7ccfc235a444eb367cae3689" +checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" dependencies = [ "bytes", "fnv", @@ -966,9 +996,9 @@ dependencies = [ [[package]] name = "headers" -version = "0.3.5" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4c4eb0471fcb85846d8b0690695ef354f9afb11cb03cac2e1d7c9253351afb0" +checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" dependencies = [ "base64 0.13.0", "bitflags", @@ -977,7 +1007,7 @@ dependencies = [ "http", "httpdate", "mime", - "sha-1", + "sha-1 0.10.0", ] [[package]] @@ -989,15 +1019,6 @@ dependencies = [ "http", ] -[[package]] -name = "heck" -version = "0.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" -dependencies = [ - "unicode-segmentation", -] - [[package]] name = "heck" version = "0.4.0" @@ -1056,7 +1077,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" dependencies = [ "crypto-mac", - "digest", + "digest 0.9.0", ] [[package]] @@ -1078,7 +1099,7 @@ checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" dependencies = [ "bytes", "fnv", - "itoa 1.0.1", + "itoa", ] [[package]] @@ -1100,9 +1121,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" [[package]] name = "httpdate" @@ -1112,9 +1133,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.16" +version = "0.14.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7ec3e62bdc98a2f0393a5048e4c30ef659440ea6e0e572965103e72bd836f55" +checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" dependencies = [ "bytes", "futures-channel", @@ -1125,9 +1146,9 @@ dependencies = [ "http-body", "httparse", "httpdate", - "itoa 0.4.8", + "itoa", "pin-project-lite", - "socket2 0.4.3", + "socket2 0.4.4", "tokio", "tower-service", "tracing", @@ -1177,9 +1198,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "282a6247722caba404c065016bbfa522806e51714c34f5dfc3e4a3a46fcb4223" +checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" dependencies = [ "autocfg", "hashbrown", @@ -1188,9 +1209,9 @@ dependencies = [ [[package]] name = "indoc" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a75aeaaef0ce18b58056d306c27b07436fbb34b8816c53094b76dd81803136" +checksum = "e7906a9fababaeacb774f72410e497a1d18de916322e33797bb2cd29baa23c9e" dependencies = [ "unindent", ] @@ -1230,9 +1251,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.3.1" +version = "2.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" +checksum = "35e70ee094dc02fd9c13fdad4940090f22dbd6ac7c9e7094a46cf0232a50bc7c" [[package]] name = "itertools" @@ -1243,12 +1264,6 @@ dependencies = [ "either", ] -[[package]] -name = "itoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" - [[package]] name = "itoa" version = "1.0.1" @@ -1281,9 +1296,9 @@ dependencies = [ [[package]] name = "js_int" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaba9bcd19568a4b4b3736b23e368e5b75e3ea126fd4cb3e4ad2ea5af274fd" +checksum = "d937f95470b270ce8b8950207715d71aa8e153c0d44c6684d59397ed4949160a" dependencies = [ "serde", ] @@ -1316,9 +1331,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.113" +version = "0.2.122" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eef78b64d87775463c549fbd80e19249ef436ea3bf1de2a1eb7e717ec7fab1e9" +checksum = "ec647867e2bf0772e28c8bcde4f0d19a9216916e890543b5a03ed8ef27b8f259" [[package]] name = "libloading" @@ -1361,9 +1376,9 @@ checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" [[package]] name = "lmdb-rkv-sys" -version = "0.11.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5" +checksum = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe" dependencies = [ "cc", "libc", @@ -1372,18 +1387,19 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.5" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" +checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.14" +version = "0.4.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" +checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" dependencies = [ "cfg-if 1.0.0", ] @@ -1478,14 +1494,15 @@ dependencies = [ [[package]] name = "mio" -version = "0.7.14" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8067b404fe97c70829f082dec8bcf4f71225d7eaea1d8645349cb76fa06205cc" +checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" dependencies = [ "libc", "log", "miow", "ntapi", + "wasi 0.11.0+wasi-snapshot-preview1", "winapi", ] @@ -1500,20 +1517,19 @@ dependencies = [ [[package]] name = "nom" -version = "7.1.0" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b1d11e1ef389c76fe5b81bcaf2ea32cf88b62bc494e19f493d0b30e7a930109" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ "memchr", "minimal-lexical", - "version_check", ] [[package]] name = "ntapi" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" dependencies = [ "winapi", ] @@ -1582,9 +1598,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da32515d9f6e6e489d7bc9d84c71b060db7247dc035bbe44eac88cf87486d8d5" +checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" [[package]] name = "opaque-debug" @@ -1611,7 +1627,7 @@ dependencies = [ "lazy_static", "percent-encoding", "pin-project", - "rand 0.8.4", + "rand 0.8.5", "thiserror", "tokio", "tokio-stream", @@ -1696,9 +1712,9 @@ dependencies = [ [[package]] name = "paste" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0744126afe1a6dd7f394cb50a716dbe086cb06e255e53d8d0185d82828358fb5" +checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" [[package]] name = "pear" @@ -1748,15 +1764,15 @@ checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" [[package]] name = "persy" -version = "1.2.1" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71907e1dfa6844b657f5ca59e9a076e7d6281efb4885526ba9e235a18e7e3b3" +checksum = "5af61053f1daed3ff0265fad7f924e43ce07642a336c79304f8e5aec205460fb" dependencies = [ "crc", "data-encoding", "fs2", "linked-hash-map", - "rand 0.8.4", + "rand 0.8.5", "thiserror", "unsigned-varint", "zigzag", @@ -1807,9 +1823,9 @@ dependencies = [ [[package]] name = "pkg-config" -version = "0.3.24" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58893f751c9b0412871a09abd62ecd2a00298c6c83befa223ef98c52aef40cbe" +checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "png" @@ -1831,9 +1847,9 @@ checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-crate" -version = "1.1.0" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ebace6889caf889b4d3f76becee12e90353f2b8c7d875534a71e5742f8f6f83" +checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" dependencies = [ "thiserror", "toml", @@ -1865,9 +1881,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.36" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7342d5883fbccae1cc37a2353b09c87c9b0f3afd73f5fb9bba687a1f733b029" +checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" dependencies = [ "unicode-xid", ] @@ -1893,9 +1909,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.14" +version = "1.0.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47aa80447ce4daf1717500037052af176af5d38cc3e571d9ec1c7353fc10c87d" +checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" dependencies = [ "proc-macro2", ] @@ -1910,19 +1926,18 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc 0.2.0", + "rand_hc", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", "rand_core 0.6.3", - "rand_hc 0.3.1", ] [[package]] @@ -1960,7 +1975,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.6", ] [[package]] @@ -1972,39 +1987,31 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.6", "redox_syscall", + "thiserror", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" dependencies = [ "aho-corasick", "memchr", @@ -2153,10 +2160,10 @@ dependencies = [ "http", "indexmap", "indoc", - "itoa 1.0.1", + "itoa", "js_int", "percent-encoding", - "rand 0.8.4", + "rand 0.8.5", "ruma-identifiers-validation", "ruma-macros", "serde", @@ -2275,7 +2282,7 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", ] [[package]] @@ -2286,9 +2293,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustls" -version = "0.20.2" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d37e5e2290f3e040b594b1a9e04377c2c671f1a1cfd9bfdef82106ac1c113f84" +checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" dependencies = [ "log", "ring", @@ -2374,18 +2381,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.134" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96b3c34c1690edf8174f5b289a336ab03f568a4460d8c6df75f2f3a692b3bc6a" +checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.134" +version = "1.0.136" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784ed1fbfa13fe191077537b0d70ec8ad1e903cfe04831da608aa36457cb653d" +checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" dependencies = [ "proc-macro2", "quote", @@ -2394,11 +2401,11 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.75" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c059c05b48c5c0067d4b4b2b4f0732dd65feb52daf7e0ea09cd87e7dadc1af79" +checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" dependencies = [ - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -2410,7 +2417,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.1", + "itoa", "ryu", "serde", ] @@ -2433,23 +2440,34 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", - "digest", + "digest 0.9.0", "opaque-debug", ] +[[package]] +name = "sha-1" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +dependencies = [ + "cfg-if 1.0.0", + "cpufeatures", + "digest 0.10.3", +] + [[package]] name = "sha2" version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ - "block-buffer", + "block-buffer 0.9.0", "cfg-if 1.0.0", "cpufeatures", - "digest", + "digest 0.9.0", "opaque-debug", ] @@ -2496,9 +2514,9 @@ dependencies = [ [[package]] name = "slab" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9def91fd1e018fe007022791f865d0ccc9b3a0d5001e01aabb8b40e46000afb5" +checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" [[package]] name = "sled" @@ -2508,7 +2526,7 @@ checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" dependencies = [ "crc32fast", "crossbeam-epoch", - "crossbeam-utils 0.8.6", + "crossbeam-utils 0.8.8", "fs2", "fxhash", "libc", @@ -2536,9 +2554,9 @@ dependencies = [ [[package]] name = "socket2" -version = "0.4.3" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f82496b90c36d70af5fcd482edaa2e0bd16fade569de1330405fecbbdac736b" +checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" dependencies = [ "libc", "winapi", @@ -2567,9 +2585,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.86" +version = "1.0.91" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a65b3f4ffa0092e9887669db0eae07941f023991ab58ea44da8fe8e2d511c6b" +checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" dependencies = [ "proc-macro2", "quote", @@ -2605,9 +2623,9 @@ dependencies = [ [[package]] name = "textwrap" -version = "0.14.2" +version = "0.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0066c8d12af8b5acd21e00547c3797fde4e8677254a7ee429176ccebbe93dd80" +checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" @@ -2631,9 +2649,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ "once_cell", ] @@ -2673,9 +2691,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.4.2+5.2.1-patched.2" +version = "0.4.3+5.2.1-patched.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5844e429d797c62945a566f8da4e24c7fe3fbd5d6617fd8bf7a0b7dc1ee0f22e" +checksum = "a1792ccb507d955b46af42c123ea8863668fae24d03721e40cad6a41773dbb49" dependencies = [ "cc", "fs_extra", @@ -2684,9 +2702,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.4.1" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c14a5a604eb8715bc5785018a37d00739b180bcf609916ddf4393d33d49ccdf" +checksum = "a5b7bcecfafe4998587d636f9ae9d55eb9d0499877b88757767c346875067098" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -2719,9 +2737,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.15.0" +version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbbf1c778ec206785635ce8ad57fe52b3009ae9e0c9f574a728f3049d3e55838" +checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" dependencies = [ "bytes", "libc", @@ -2731,6 +2749,7 @@ dependencies = [ "once_cell", "pin-project-lite", "signal-hook-registry", + "socket2 0.4.4", "tokio-macros", "winapi", ] @@ -2748,9 +2767,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.2" +version = "0.23.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a27d5f2b839802bd8267fa19b0530f5a08b9c08cd417976be2a65d130fe1c11b" +checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" dependencies = [ "rustls", "tokio", @@ -2782,16 +2801,16 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.9" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e99e1983e5d376cd8eb4b66604d2e99e79f5bd988c3055891dcd8c9e2604cc0" +checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" dependencies = [ "bytes", "futures-core", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", ] [[package]] @@ -2805,9 +2824,9 @@ dependencies = [ [[package]] name = "tower" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5651b5f6860a99bd1adb59dbfe1db8beb433e73709d9032b413a77e2fb7c066a" +checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" dependencies = [ "futures-core", "futures-util", @@ -2822,9 +2841,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03650267ad175b51c47d02ed9547fc7d4ba2c7e5cb76df0bed67edd1825ae297" +checksum = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8" dependencies = [ "async-compression", "bitflags", @@ -2857,9 +2876,9 @@ checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" [[package]] name = "tracing" -version = "0.1.29" +version = "0.1.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "375a639232caf30edfc78e8d89b2d4c375515393e7af7e16f01cd96917fb2105" +checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" dependencies = [ "cfg-if 1.0.0", "log", @@ -2870,9 +2889,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.18" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4f480b8f81512e825f337ad51e94c1eb5d3bbdf2b363dcd01e2b19a9ffe3f8e" +checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" dependencies = [ "proc-macro2", "quote", @@ -2881,11 +2900,12 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.21" +version = "0.1.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f4ed65637b8390770814083d20756f87bfa2c21bf2f110babdc5438351746e4" +checksum = "90442985ee2f57c9e1b548ee72ae842f4a9a20e3f417cc38dbc5dc684d9bb4ee" dependencies = [ "lazy_static", + "valuable", ] [[package]] @@ -2912,9 +2932,9 @@ dependencies = [ [[package]] name = "tracing-serde" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" +checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" dependencies = [ "serde", "tracing-core", @@ -2944,9 +2964,9 @@ dependencies = [ [[package]] name = "trust-dns-proto" -version = "0.20.3" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +checksum = "ca94d4e9feb6a181c690c4040d7a24ef34018d8313ac5044a61d21222ae24e31" dependencies = [ "async-trait", "cfg-if 1.0.0", @@ -2959,7 +2979,7 @@ dependencies = [ "ipnet", "lazy_static", "log", - "rand 0.8.4", + "rand 0.8.5", "smallvec", "thiserror", "tinyvec", @@ -2969,9 +2989,9 @@ dependencies = [ [[package]] name = "trust-dns-resolver" -version = "0.20.3" +version = "0.20.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +checksum = "ecae383baad9995efaa34ce8e57d12c3f305e545887472a492b838f4b5cfb77a" dependencies = [ "cfg-if 1.0.0", "futures-util", @@ -3023,12 +3043,6 @@ dependencies = [ "tinyvec", ] -[[package]] -name = "unicode-segmentation" -version = "1.8.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" - [[package]] name = "unicode-xid" version = "0.2.2" @@ -3037,9 +3051,9 @@ checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" [[package]] name = "unindent" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f14ee04d9415b52b3aeab06258a3f07093182b88ba0f9b8d203f211a7a7d41c7" +checksum = "514672a55d7380da379785a4d70ca8386c8883ff7eaae877be4d2081cebe73d8" [[package]] name = "unsigned-varint" @@ -3071,9 +3085,15 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.4", + "getrandom 0.2.6", ] +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3108,6 +3128,12 @@ version = "0.10.2+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +[[package]] +name = "wasi" +version = "0.11.0+wasi-snapshot-preview1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" + [[package]] name = "wasm-bindgen" version = "0.2.79" @@ -3263,24 +3289,24 @@ dependencies = [ [[package]] name = "yansi" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.5.0" +version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc222aec311c323c717f56060324f32b82da1ce1dd81d9a09aa6a9030bfe08db" +checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.3.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81e8f13fef10b63c06356d65d416b070798ddabcadc10d3ece0c5be9b3c7eddb" +checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ "proc-macro2", "quote", diff --git a/src/database/globals.rs b/src/database/globals.rs index 797e5b1d..d363e933 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -149,7 +149,12 @@ impl Globals { .build()?; // Supported and stable room versions - let stable_room_versions = vec![RoomVersionId::V6, RoomVersionId::V7, RoomVersionId::V8, RoomVersionId::V9]; + let stable_room_versions = vec![ + RoomVersionId::V6, + RoomVersionId::V7, + RoomVersionId::V8, + RoomVersionId::V9, + ]; // Experimental, partially supported room versions let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; From 3573d40027926f77617fcce63c762c793e785551 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 19 Feb 2022 12:53:11 +0100 Subject: [PATCH 265/445] fix warnings --- src/client_server/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/session.rs b/src/client_server/session.rs index c31636db..c2a79ca6 100644 --- a/src/client_server/session.rs +++ b/src/client_server/session.rs @@ -14,7 +14,7 @@ use tracing::info; #[derive(Debug, Deserialize)] struct Claims { sub: String, - exp: usize, + //exp: usize, } /// # `GET /_matrix/client/r0/login` From b6b27b66c83c322a9887b7b207788f7e4b2c348f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 23 Mar 2022 11:05:41 +0100 Subject: [PATCH 266/445] fix: don't allow unjoined users to send typing notifications --- src/client_server/typing.rs | 11 +++++++++-- src/server_server.rs | 26 +++++++++++++++----------- 2 files changed, 24 insertions(+), 13 deletions(-) diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 60fc1cc4..413c5400 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,5 +1,5 @@ -use crate::{database::DatabaseGuard, utils, Result, Ruma}; -use ruma::api::client::typing::create_typing_event; +use crate::{database::DatabaseGuard, Error, utils, Result, Ruma}; +use ruma::api::client::{typing::create_typing_event, error::ErrorKind}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// @@ -12,6 +12,13 @@ pub async fn create_typing_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if !db.rooms.is_joined(sender_user, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You are not in this room.", + )); + } + if let Typing::Yes(duration) = body.state { db.rooms.edus.typing_add( sender_user, diff --git a/src/server_server.rs b/src/server_server.rs index e95c4c0f..67ad3691 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -770,17 +770,21 @@ pub async fn send_transaction_message_route( } } Edu::Typing(typing) => { - if typing.typing { - db.rooms.edus.typing_add( - &typing.user_id, - &typing.room_id, - 3000 + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms - .edus - .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; + if db.rooms.is_joined(&typing.user_id, &typing.room_id)? { + if typing.typing { + db.rooms.edus.typing_add( + &typing.user_id, + &typing.room_id, + 3000 + utils::millis_since_unix_epoch(), + &db.globals, + )?; + } else { + db.rooms.edus.typing_remove( + &typing.user_id, + &typing.room_id, + &db.globals, + )?; + } } } Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { From 00b362b43ba61d0d5a2b43a944e47556730e42c9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 7 Apr 2022 17:09:07 +0200 Subject: [PATCH 267/445] fix: cors warning --- src/client_server/typing.rs | 4 ++-- src/main.rs | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs index 413c5400..cac5a5fd 100644 --- a/src/client_server/typing.rs +++ b/src/client_server/typing.rs @@ -1,5 +1,5 @@ -use crate::{database::DatabaseGuard, Error, utils, Result, Ruma}; -use ruma::api::client::{typing::create_typing_event, error::ErrorKind}; +use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; +use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// diff --git a/src/main.rs b/src/main.rs index d20ee752..67ec82e2 100644 --- a/src/main.rs +++ b/src/main.rs @@ -141,7 +141,7 @@ async fn run_server(config: &Config, db: Arc>) -> io::Result<() .compression() .layer( CorsLayer::new() - .allow_origin(cors::any()) + .allow_origin(cors::Any) .allow_methods([ Method::GET, Method::POST, From 07a3a6fa9a9f8c00788fb262dd19139bd2c22192 Mon Sep 17 00:00:00 2001 From: Zeyphros Date: Fri, 8 Apr 2022 22:05:13 +0200 Subject: [PATCH 268/445] Return an error when signing an event fails Prevents the server from crashing/become unresponsive when overly long messages are sent --- src/database/rooms.rs | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 25337b3b..955489b4 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1938,13 +1938,25 @@ impl Rooms { CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), ); - ruma::signatures::hash_and_sign_event( + match ruma::signatures::hash_and_sign_event( db.globals.server_name().as_str(), db.globals.keypair(), &mut pdu_json, &room_version_id, - ) - .expect("event is valid, we just created it"); + ) { + Ok(_) => {} + Err(e) => { + return match e { + ruma::signatures::Error::PduSize => { + Err(Error::BadRequest(ErrorKind::TooLarge, "Message is to long")) + } + _ => Err(Error::BadRequest( + ErrorKind::Unknown, + "Signing event failed", + )), + } + } + } // Generate event id pdu.event_id = EventId::parse_arc(format!( From b10dbc747bed15b7eeffb22c3478869de697d060 Mon Sep 17 00:00:00 2001 From: Paul van Tilburg Date: Sat, 9 Apr 2022 15:13:01 +0200 Subject: [PATCH 269/445] Remove the address override in deb generated config This override was accidentally introduced by commit de6c331. The Debian postinst script will ask for and generate a config with the address set. This should not be overriden by what is set in the default config and is thus a deviation from the standard docs. --- debian/postinst | 3 --- 1 file changed, 3 deletions(-) diff --git a/debian/postinst b/debian/postinst index 378f99ed..aab2480c 100644 --- a/debian/postinst +++ b/debian/postinst @@ -78,9 +78,6 @@ trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time #log = "info,state_res=warn,rocket=off,_=off,sled=off" - -address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy -#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. EOF fi ;; From 729d66aa11180b56b8802191852529fda9354c36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 10 Apr 2022 14:56:43 +0200 Subject: [PATCH 270/445] feat: register missing add_backup_keys route --- src/main.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/main.rs b/src/main.rs index 67ec82e2..9a0928a0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -226,6 +226,7 @@ fn routes() -> Router { .ruma_route(client_server::delete_backup_version_route) .ruma_route(client_server::get_latest_backup_info_route) .ruma_route(client_server::get_backup_info_route) + .ruma_route(client_server::add_backup_keys_route) .ruma_route(client_server::add_backup_keys_for_room_route) .ruma_route(client_server::add_backup_keys_for_session_route) .ruma_route(client_server::delete_backup_keys_for_room_route) From 090d0fe68420777c71b6c88124c2e04311a2e0be Mon Sep 17 00:00:00 2001 From: Zeyphros Date: Wed, 13 Apr 2022 00:08:55 +0200 Subject: [PATCH 271/445] Fix typo --- src/database/rooms.rs | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 955489b4..c885c960 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -1947,9 +1947,10 @@ impl Rooms { Ok(_) => {} Err(e) => { return match e { - ruma::signatures::Error::PduSize => { - Err(Error::BadRequest(ErrorKind::TooLarge, "Message is to long")) - } + ruma::signatures::Error::PduSize => Err(Error::BadRequest( + ErrorKind::TooLarge, + "Message is too long", + )), _ => Err(Error::BadRequest( ErrorKind::Unknown, "Signing event failed", From efe9d5000e1f85e21477e3a6345d2c756ced6c3c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jan=20Christian=20Gr=C3=BCnhage?= Date: Thu, 14 Apr 2022 16:42:08 +0200 Subject: [PATCH 272/445] enable FedDest doc-test Doc rendering is exactly as before, but it now actually tests the code --- src/server_server.rs | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/src/server_server.rs b/src/server_server.rs index 67ad3691..a227f57c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -79,12 +79,16 @@ use tracing::{debug, error, info, trace, warn}; /// was no port specified to construct a SocketAddr with. /// /// # Examples: -/// ```rust,ignore +/// ```rust +/// # use conduit::server_server::FedDest; +/// # fn main() -> Result<(), std::net::AddrParseError> { /// FedDest::Literal("198.51.100.3:8448".parse()?); /// FedDest::Literal("[2001:db8::4:5]:443".parse()?); /// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); /// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); /// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); +/// # Ok(()) +/// # } /// ``` #[derive(Clone, Debug, PartialEq)] pub enum FedDest { From 23f29d1bda537f8c67d9e6c90a1650c0b8f1d13a Mon Sep 17 00:00:00 2001 From: rmsthebest Date: Sun, 17 Apr 2022 23:08:17 +0000 Subject: [PATCH 273/445] Added Caddy to the web proxy examples --- DEPLOY.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index a711cc90..99ba6410 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -156,7 +156,7 @@ sudo chmod 700 /var/lib/matrix-conduit/ ## Setting up the Reverse Proxy -This depends on whether you use Apache, Nginx or another web server. +This depends on whether you use Apache, Caddy, Nginx or another web server. ### Apache @@ -182,6 +182,19 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ $ sudo systemctl reload apache2 ``` +### Caddy +Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name). +```caddy +your.server.name, your.server.name:8448 { + reverse_proxy /_matrix/* 127.0.0.1:6167 +} +``` +That's it! Just start or enable the service and you're set. +```bash +$ sudo systemctl enable caddy +``` + + ### Nginx If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` @@ -216,6 +229,8 @@ $ sudo systemctl reload nginx ## SSL Certificate +If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step. + The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: ```bash From bb033fe02a64da716f82cf64fc84e0c0b7622322 Mon Sep 17 00:00:00 2001 From: = <11652273+rmsthebest@users.noreply.github.com> Date: Sun, 1 May 2022 17:49:02 +0200 Subject: [PATCH 274/445] added a command to the admin bot to create a new user, even with registration disabled --- src/client_server/mod.rs | 1 + src/database/admin.rs | 65 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 66 insertions(+) diff --git a/src/client_server/mod.rs b/src/client_server/mod.rs index a7241b0d..65b7a100 100644 --- a/src/client_server/mod.rs +++ b/src/client_server/mod.rs @@ -65,3 +65,4 @@ pub use voip::*; pub const DEVICE_ID_LENGTH: usize = 10; pub const TOKEN_LENGTH: usize = 256; pub const SESSION_ID_LENGTH: usize = 256; +pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15; diff --git a/src/database/admin.rs b/src/database/admin.rs index 432bc3a6..dcf09ebc 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -6,6 +6,7 @@ use std::{ }; use crate::{ + client_server::AUTO_GEN_PASSWORD_LENGTH, error::{Error, Result}, pdu::PduBuilder, server_server, utils, @@ -268,6 +269,13 @@ enum AdminCommand { /// Username of the user for whom the password should be reset username: String, }, + /// Create a new user + CreateUser { + /// Username of the new user + username: String, + /// Password of the new user, if unspecified one is generated + password: Option, + }, } fn process_admin_command( @@ -480,6 +488,63 @@ fn process_admin_command( )), } } + AdminCommand::CreateUser { username, password } => { + let password = password.unwrap_or(utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + // Validate user id + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + db.globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + if user_id.is_historical() { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} is not allowed due to historical" + ))); + } + if db.users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} already exists" + ))); + } + // Create user + db.users.create(&user_id, Some(password.as_str()))?; + + // Default to pretty displayname + let displayname = format!("{} ⚡️", user_id.localpart()); + db.users + .set_displayname(&user_id, Some(displayname.clone()))?; + + // Initial account data + db.account_data.update( + None, + &user_id, + ruma::events::GlobalAccountDataEventType::PushRules + .to_string() + .into(), + &ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::push::Ruleset::server_default(&user_id), + }, + }, + &db.globals, + )?; + + // we dont add a device since we're not the user, just the creator + + db.flush()?; + + // Inhibit login does not work for guests + RoomMessageEventContent::text_plain(format!( + "Created user with user_id: {user_id} and password: {password}" + )) + } }; Ok(reply_message_content) From 8392809eb1ce86b715ab48444cee9104288bb204 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Tue, 10 May 2022 07:26:19 +0000 Subject: [PATCH 275/445] Adjust some files to the AUR patches --- conduit-example.toml | 7 +++++++ debian/matrix-conduit.service | 1 + 2 files changed, 8 insertions(+) diff --git a/conduit-example.toml b/conduit-example.toml index 23c18446..362f7e7e 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -1,3 +1,10 @@ +# ============================================================================= +# This is the official example config for Conduit. +# If you use it for your server, you will need to adjust it to your own needs. +# At the very least, change the server_name field! +# ============================================================================= + + [global] # The server_name is the pretty name of this server. It is used as a suffix for user # and room ids. Examples: matrix.org, conduit.rs diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service index 7c12d1a7..299f2680 100644 --- a/debian/matrix-conduit.service +++ b/debian/matrix-conduit.service @@ -3,6 +3,7 @@ Description=Conduit Matrix homeserver After=network.target [Service] +DynamicUser=yes User=_matrix-conduit Group=_matrix-conduit Type=simple From d9782c508a6984b808c80abdbdb3579de4cda181 Mon Sep 17 00:00:00 2001 From: Dietrich Date: Mon, 13 Jun 2022 20:03:30 +0200 Subject: [PATCH 276/445] rust-analyzer-extension moved to rust-lang The recommended extension id could not be found as rust-analyzer now has the id `rust-lang.rust-analyzer` --- .vscode/extensions.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/extensions.json b/.vscode/extensions.json index 7963e9d4..037f20d7 100644 --- a/.vscode/extensions.json +++ b/.vscode/extensions.json @@ -1,6 +1,6 @@ { "recommendations": [ - "matklad.rust-analyzer", + "rust-lang.rust-analyzer", "bungcip.better-toml", "ms-azuretools.vscode-docker", "eamodio.gitlens", From ae8e143fe90b95179ed54668e171d07530b1b162 Mon Sep 17 00:00:00 2001 From: Dietrich Date: Mon, 13 Jun 2022 20:08:18 +0200 Subject: [PATCH 277/445] Add a section to Ports and forwarding --- DEPLOY.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/DEPLOY.md b/DEPLOY.md index a711cc90..b77dd29c 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -58,6 +58,12 @@ In Debian you can use this command to create a Conduit user: sudo adduser --system conduit --no-create-home ``` +## Forwarding Ports in the firewall or the router + +Conduit uses the ports 443 and 8448 both of which need to be open in the firewall. + +If conduit runs behind a router or in a container and has a different public IP address than the host system these public Ports need to be forwarded directly or indirectly to the port mentioned in the config. + ## Setting up a systemd service Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your From 58d784aa29b1602a872b1a287eab8c3e59375dce Mon Sep 17 00:00:00 2001 From: Dietrich Date: Mon, 13 Jun 2022 20:23:08 +0200 Subject: [PATCH 278/445] Adding a hint to closed ports in the testing section --- DEPLOY.md | 1 + 1 file changed, 1 insertion(+) diff --git a/DEPLOY.md b/DEPLOY.md index b77dd29c..21fcadf3 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -254,6 +254,7 @@ $ curl https://your.server.name:8448/_matrix/client/versions ``` - To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/) + If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly. # What's next? From bd3f9e0dbeca8c739741d5d8060401b75dfc1560 Mon Sep 17 00:00:00 2001 From: Dietrich Date: Mon, 13 Jun 2022 20:45:12 +0200 Subject: [PATCH 279/445] Fix spelling. --- DEPLOY.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index 21fcadf3..7dc25db5 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -58,11 +58,11 @@ In Debian you can use this command to create a Conduit user: sudo adduser --system conduit --no-create-home ``` -## Forwarding Ports in the firewall or the router +## Forwarding ports in the firewall or the router Conduit uses the ports 443 and 8448 both of which need to be open in the firewall. -If conduit runs behind a router or in a container and has a different public IP address than the host system these public Ports need to be forwarded directly or indirectly to the port mentioned in the config. +If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. ## Setting up a systemd service @@ -253,7 +253,7 @@ $ curl https://your.server.name/_matrix/client/versions $ curl https://your.server.name:8448/_matrix/client/versions ``` -- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/) +- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/). If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly. # What's next? From 8a63a2cc6848c8c8b27c7f914d84c4cf37eb918b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Dobo=C5=A1?= Date: Wed, 15 Jun 2022 13:07:07 +0000 Subject: [PATCH 280/445] Fix FluffyChat Compatibility --- src/client_server/unversioned.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index 294c7536..fd0277c6 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -18,7 +18,7 @@ pub async fn get_supported_versions_route( _body: Ruma, ) -> Result { let resp = get_supported_versions::Response { - versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()], + versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned(), "v1.1".to_owned(), "v1.2".to_owned()], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; From b862283ed9549b1664c4f606cac9f869c5e884e5 Mon Sep 17 00:00:00 2001 From: Miepee Date: Thu, 16 Jun 2022 13:23:45 +0000 Subject: [PATCH 281/445] Mention different databse backends in DEPLOY.md --- DEPLOY.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index 7dc25db5..930a5582 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -95,7 +95,8 @@ $ sudo systemctl daemon-reload ## Creating the Conduit configuration file Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment -to read it. You need to change at least the server name.** +to read it. You need to change at least the server name.** +You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended. ```toml [global] From c3924b566b4b67dd2755a5e5877ab47f7d6041dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sat, 18 Jun 2022 11:04:16 +0000 Subject: [PATCH 282/445] feat: if txn id exists in the db, skip the event --- src/client_server/to_device.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/src/client_server/to_device.rs b/src/client_server/to_device.rs index 5f4ac583..51441dd4 100644 --- a/src/client_server/to_device.rs +++ b/src/client_server/to_device.rs @@ -20,17 +20,14 @@ pub async fn send_event_to_device_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); - // TODO: uncomment when https://github.com/vector-im/element-android/issues/3589 is solved // Check if this is a new transaction id - /* if db .transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() { - return Ok(send_event_to_device::v3::Response.into()); + return Ok(send_event_to_device::v3::Response {}); } - */ for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { From e03a2b86364a913e0d016b5962a3312f412e7597 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Sat, 18 Jun 2022 11:05:42 +0000 Subject: [PATCH 283/445] chore(docker): Bump base image to alpine 3.16.0 --- docker/ci-binaries-packaging.Dockerfile | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index 6964a02f..4c1199ed 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -7,8 +7,7 @@ # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM docker.io/alpine@sha256:b66bccf2e0cca8e5fb79f7d3c573dd76c4787d1d883f5afe6c9d136a260bba07 AS runner -# = alpine:3.15.3 +FROM docker.io/alpine:3.16.0@sha256:4ff3ca91275773af45cb4b0834e12b7eb47d1c18f770a0b151381cd227f4c253 AS runner # Standard port on which Conduit launches. From 84ec057f6e673e822bcc6e6693b59831602f41de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radek=20St=C4=99pie=C5=84?= Date: Sat, 18 Jun 2022 11:13:37 +0000 Subject: [PATCH 284/445] Allow registration without username --- src/client_server/account.rs | 77 +++++++++++++++++------------------- 1 file changed, 36 insertions(+), 41 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 820e4f1e..984b1ba2 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -16,8 +16,10 @@ use ruma::{ uiaa::{AuthFlow, AuthType, UiaaInfo}, }, events::{ - room::member::{MembershipState, RoomMemberEventContent}, - room::message::RoomMessageEventContent, + room::{ + member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, + }, GlobalAccountDataEventType, RoomEventType, }, push, UserId, @@ -27,7 +29,7 @@ use tracing::{info, warn}; use register::RegistrationKind; -const GUEST_NAME_LENGTH: usize = 10; +const RANDOM_USER_ID_LENGTH: usize = 10; /// # `GET /_matrix/client/r0/register/available` /// @@ -95,38 +97,38 @@ pub async fn register_route( let is_guest = body.kind == RegistrationKind::Guest; - let mut missing_username = false; - - // Validate user id - let user_id = UserId::parse_with_server_name( - if is_guest { - utils::random_string(GUEST_NAME_LENGTH) - } else { - body.username.clone().unwrap_or_else(|| { - // If the user didn't send a username field, that means the client is just trying - // the get an UIAA error to see available flows - missing_username = true; - // Just give the user a random name. He won't be able to register with it anyway. - utils::random_string(GUEST_NAME_LENGTH) - }) + let user_id = match (&body.username, is_guest) { + (Some(username), false) => { + let proposed_user_id = + UserId::parse_with_server_name(username.to_lowercase(), db.globals.server_name()) + .ok() + .filter(|user_id| { + !user_id.is_historical() + && user_id.server_name() == db.globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; + if db.users.exists(&proposed_user_id)? { + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); + } + proposed_user_id } - .to_lowercase(), - db.globals.server_name(), - ) - .ok() - .filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; - - // Check if username is creative enough - if db.users.exists(&user_id)? { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } + _ => loop { + let proposed_user_id = UserId::parse_with_server_name( + utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), + db.globals.server_name(), + ) + .unwrap(); + if !db.users.exists(&proposed_user_id)? { + break proposed_user_id; + } + }, + }; // UIAA let mut uiaainfo = UiaaInfo { @@ -169,13 +171,6 @@ pub async fn register_route( } } - if missing_username { - return Err(Error::BadRequest( - ErrorKind::MissingParam, - "Missing username field.", - )); - } - let password = if is_guest { None } else { From 7239243163362ede719b05e60aa5a37ccba1766b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Radek=20St=C4=99pie=C5=84?= Date: Sat, 18 Jun 2022 11:17:09 +0000 Subject: [PATCH 285/445] Hide users from user directory if they are only in private rooms and they don't share a room --- src/client_server/user_directory.rs | 46 +++++++++++++++++++++++++++-- tests/sytest/sytest-whitelist | 3 ++ 2 files changed, 46 insertions(+), 3 deletions(-) diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs index 7c0bcc16..349c1399 100644 --- a/src/client_server/user_directory.rs +++ b/src/client_server/user_directory.rs @@ -1,15 +1,23 @@ use crate::{database::DatabaseGuard, Result, Ruma}; -use ruma::api::client::user_directory::search_users; +use ruma::{ + api::client::user_directory::search_users, + events::{ + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, + StateEventType, + }, +}; /// # `POST /_matrix/client/r0/user_directory/search` /// /// Searches all known users for a match. /// -/// - TODO: Hide users that are not in any public rooms? +/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) +/// and don't share a room with the sender pub async fn search_users_route( db: DatabaseGuard, body: Ruma, ) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let limit = u64::from(body.limit) as usize; let mut users = db.users.iter().filter_map(|user_id| { @@ -41,7 +49,39 @@ pub async fn search_users_route( return None; } - Some(user) + let user_is_in_public_rooms = + db.rooms + .rooms_joined(&user_id) + .filter_map(|r| r.ok()) + .any(|room| { + db.rooms + .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .map_or(false, |event| { + event.map_or(false, |event| { + serde_json::from_str(event.content.get()) + .map_or(false, |r: RoomJoinRulesEventContent| { + r.join_rule == JoinRule::Public + }) + }) + }) + }); + + if user_is_in_public_rooms { + return Some(user); + } + + let user_is_in_shared_rooms = db + .rooms + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) + .ok()? + .next() + .is_some(); + + if user_is_in_shared_rooms { + return Some(user); + } + + None }); let results = users.by_ref().take(limit).collect(); diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist index 5afc3fd9..1c969dba 100644 --- a/tests/sytest/sytest-whitelist +++ b/tests/sytest/sytest-whitelist @@ -445,6 +445,9 @@ Typing notifications don't leak Uninvited users cannot join the room Unprivileged users can set m.room.topic if it only needs level 0 User appears in user directory +User in private room doesn't appear in user directory +User joining then leaving public room appears and dissappears from directory +User in shared private room does appear in user directory until leave User can create and send/receive messages in a room with version 1 User can create and send/receive messages in a room with version 2 User can create and send/receive messages in a room with version 3 From 722e553c6edea297bd44e7a5e715a30496d34faa Mon Sep 17 00:00:00 2001 From: Jim Date: Sat, 18 Jun 2022 14:47:32 +0000 Subject: [PATCH 286/445] Remove rust version requirement from deploy.md --- DEPLOY.md | 1 - 1 file changed, 1 deletion(-) diff --git a/DEPLOY.md b/DEPLOY.md index ddf0aac5..f0990dcf 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -43,7 +43,6 @@ $ sudo apt install libclang-dev build-essential $ cargo build --release ``` -Note that this currently requires Rust 1.50. If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md). From 9b898248c7cd5c060fd806db98068c6298f6aac5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 30 May 2022 12:58:43 +0200 Subject: [PATCH 287/445] feat: more admin commands, better logging --- Cargo.toml | 6 +- src/client_server/unversioned.rs | 7 +- src/database.rs | 2 + src/database/admin.rs | 56 +++++++++++++++ src/database/globals.rs | 4 ++ src/database/rooms.rs | 18 +++++ src/ruma_wrapper/axum.rs | 2 +- src/server_server.rs | 119 ++++++++++++++++++++++--------- 8 files changed, 177 insertions(+), 37 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 64b7a233..10be7501 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -30,7 +30,7 @@ tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { version = "1.2" , optional = true, features=["background_ops"] } +persy = { version = "1.2" , optional = true, features = ["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" @@ -64,7 +64,7 @@ regex = "1.5.4" # jwt jsonwebtokens jsonwebtoken = "7.2.0" # Performance measurements -tracing = { version = "0.1.26", features = ["release_max_level_warn"] } +tracing = { version = "0.1.26", features = [] } tracing-subscriber = "0.2.20" tracing-flame = "0.1.0" opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } @@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs index fd0277c6..8a5c3d25 100644 --- a/src/client_server/unversioned.rs +++ b/src/client_server/unversioned.rs @@ -18,7 +18,12 @@ pub async fn get_supported_versions_route( _body: Ruma, ) -> Result { let resp = get_supported_versions::Response { - versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned(), "v1.1".to_owned(), "v1.2".to_owned()], + versions: vec![ + "r0.5.0".to_owned(), + "r0.6.0".to_owned(), + "v1.1".to_owned(), + "v1.2".to_owned(), + ], unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), }; diff --git a/src/database.rs b/src/database.rs index 4a03f18c..a0937c29 100644 --- a/src/database.rs +++ b/src/database.rs @@ -213,6 +213,8 @@ impl Database { userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + disabledroomids: builder.open_tree("disabledroomids")?, + lazyloadedids: builder.open_tree("lazyloadedids")?, userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, diff --git a/src/database/admin.rs b/src/database/admin.rs index dcf09ebc..c6ef9a64 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -231,9 +231,15 @@ enum AdminCommand { /// List all the currently registered appservices ListAppservices, + /// List all rooms the server knows about + ListRooms, + /// List users in the database ListLocalUsers, + /// List all rooms we are currently handling an incoming pdu from + IncomingFederation, + /// Get the auth_chain of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) @@ -269,6 +275,7 @@ enum AdminCommand { /// Username of the user for whom the password should be reset username: String, }, + /// Create a new user CreateUser { /// Username of the new user @@ -276,6 +283,11 @@ enum AdminCommand { /// Password of the new user, if unspecified one is generated password: Option, }, + + /// Disables incoming federation handling for a room. + DisableRoom { room_id: Box }, + /// Enables incoming federation handling for a room again. + EnableRoom { room_id: Box }, } fn process_admin_command( @@ -336,6 +348,26 @@ fn process_admin_command( RoomMessageEventContent::text_plain("Failed to get appservices.") } } + AdminCommand::ListRooms => { + let room_ids = db.rooms.iter_ids(); + let output = format!( + "Rooms:\n{}", + room_ids + .filter_map(|r| r.ok()) + .map(|id| id.to_string() + + "\tMembers: " + + &db + .rooms + .room_joined_count(&id) + .ok() + .flatten() + .unwrap_or(0) + .to_string()) + .collect::>() + .join("\n") + ); + RoomMessageEventContent::text_plain(output) + } AdminCommand::ListLocalUsers => match db.users.list_local_users() { Ok(users) => { let mut msg: String = format!("Found {} local user account(s):\n", users.len()); @@ -344,6 +376,22 @@ fn process_admin_command( } Err(e) => RoomMessageEventContent::text_plain(e.to_string()), }, + AdminCommand::IncomingFederation => { + let map = db.globals.roomid_federationhandletime.read().unwrap(); + let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); + + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + msg += &format!( + "{} {}: {}m{}s\n", + r, + e, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + RoomMessageEventContent::text_plain(&msg) + } AdminCommand::GetAuthChain { event_id } => { let event_id = Arc::::from(event_id); if let Some(event) = db.rooms.get_pdu_json(&event_id)? { @@ -545,6 +593,14 @@ fn process_admin_command( "Created user with user_id: {user_id} and password: {password}" )) } + AdminCommand::DisableRoom { room_id } => { + db.rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; + RoomMessageEventContent::text_plain("Room disabled.") + } + AdminCommand::EnableRoom { room_id } => { + db.rooms.disabledroomids.remove(room_id.as_bytes())?; + RoomMessageEventContent::text_plain("Room enabled.") + } }; Ok(reply_message_content) diff --git a/src/database/globals.rs b/src/database/globals.rs index d363e933..7e09128e 100644 --- a/src/database/globals.rs +++ b/src/database/globals.rs @@ -52,6 +52,8 @@ pub struct Globals { pub roomid_mutex_insert: RwLock, Arc>>>, pub roomid_mutex_state: RwLock, Arc>>>, pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer + pub roomid_federationhandletime: RwLock, (Box, Instant)>>, + pub stateres_mutex: Arc>, pub rotate: RotationHandler, } @@ -183,6 +185,8 @@ impl Globals { roomid_mutex_state: RwLock::new(HashMap::new()), roomid_mutex_insert: RwLock::new(HashMap::new()), roomid_mutex_federation: RwLock::new(HashMap::new()), + roomid_federationhandletime: RwLock::new(HashMap::new()), + stateres_mutex: Arc::new(Mutex::new(())), sync_receivers: RwLock::new(HashMap::new()), rotate: RotationHandler::new(), }; diff --git a/src/database/rooms.rs b/src/database/rooms.rs index c885c960..2c1b8f44 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -76,6 +76,8 @@ pub struct Rooms { pub(super) userroomid_leftstate: Arc, pub(super) roomuserid_leftcount: Arc, + pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled + pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 @@ -2858,6 +2860,18 @@ impl Rooms { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } + #[tracing::instrument(skip(self))] + pub fn iter_ids(&self) -> impl Iterator>> + '_ { + self.roomid_shortroomid.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) + }) + } + #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { @@ -3140,6 +3154,10 @@ impl Rooms { .transpose() } + pub fn is_disabled(&self, room_id: &RoomId) -> Result { + Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) + } + /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] pub fn rooms_joined<'a>( diff --git a/src/ruma_wrapper/axum.rs b/src/ruma_wrapper/axum.rs index fdb140fe..45e9d9a8 100644 --- a/src/ruma_wrapper/axum.rs +++ b/src/ruma_wrapper/axum.rs @@ -338,7 +338,7 @@ impl Credentials for XMatrix { "origin" => origin = Some(value.try_into().ok()?), "key" => key = Some(value.to_owned()), "sig" => sig = Some(value.to_owned()), - _ => warn!( + _ => debug!( "Unexpected field `{}` in X-Matrix Authorization header", name ), diff --git a/src/server_server.rs b/src/server_server.rs index a227f57c..7b08cf9b 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -768,7 +768,7 @@ pub async fn send_transaction_message_route( )?; } else { // TODO fetch missing events - debug!("No known event ids in read receipt: {:?}", user_updates); + info!("No known event ids in read receipt: {:?}", user_updates); } } } @@ -926,6 +926,13 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } + match db.rooms.is_disabled(room_id) { + Ok(false) => {} + _ => { + return Err("Federation of this room is currently disabled on this server.".to_owned()); + } + } + // 1. Skip the PDU if we already have it as a timeline event if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { return Ok(Some(pdu_id.to_vec())); @@ -1038,6 +1045,15 @@ pub(crate) async fn handle_incoming_pdu<'a>( let mut errors = 0; for prev_id in dbg!(sorted) { + match db.rooms.is_disabled(room_id) { + Ok(false) => {} + _ => { + return Err( + "Federation of this room is currently disabled on this server.".to_owned(), + ); + } + } + if errors >= 5 { break; } @@ -1047,6 +1063,11 @@ pub(crate) async fn handle_incoming_pdu<'a>( } let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); let event_id = pdu.event_id.clone(); if let Err(e) = upgrade_outlier_to_timeline_pdu( pdu, @@ -1063,6 +1084,11 @@ pub(crate) async fn handle_incoming_pdu<'a>( warn!("Prev event {} failed: {}", event_id, e); } let elapsed = start_time.elapsed(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); warn!( "Handling prev event {} took {}m{}s", event_id, @@ -1072,7 +1098,13 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } - upgrade_outlier_to_timeline_pdu( + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + let r = upgrade_outlier_to_timeline_pdu( incoming_pdu, val, &create_event, @@ -1081,10 +1113,17 @@ pub(crate) async fn handle_incoming_pdu<'a>( room_id, pub_key_map, ) - .await + .await; + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + + r } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip(create_event, value, db, pub_key_map))] fn handle_outlier_pdu<'a>( origin: &'a ServerName, create_event: &'a PduEvent, @@ -1166,7 +1205,7 @@ fn handle_outlier_pdu<'a>( .await; // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - debug!( + info!( "Auth check for {} based on auth events", incoming_pdu.event_id ); @@ -1221,19 +1260,19 @@ fn handle_outlier_pdu<'a>( return Err("Event has failed auth check with auth events.".to_owned()); } - debug!("Validation successful."); + info!("Validation successful."); // 7. Persist the event as an outlier. db.rooms .add_pdu_outlier(&incoming_pdu.event_id, &val) .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; - debug!("Added pdu as outlier."); + info!("Added pdu as outlier."); Ok((Arc::new(incoming_pdu), val)) }) } -#[tracing::instrument(skip_all)] +#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] async fn upgrade_outlier_to_timeline_pdu( incoming_pdu: Arc, val: BTreeMap, @@ -1255,6 +1294,8 @@ async fn upgrade_outlier_to_timeline_pdu( return Err("Event has been soft failed".into()); } + info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); + let create_event_content: RoomCreateEventContent = serde_json::from_str(create_event.content.get()).map_err(|e| { warn!("Invalid create event: {}", e); @@ -1270,7 +1311,7 @@ async fn upgrade_outlier_to_timeline_pdu( // TODO: if we know the prev_events of the incoming event we can avoid the request and build // the state from a known point and resolve if > 1 prev_event - debug!("Requesting state at event."); + info!("Requesting state at event"); let mut state_at_incoming_event = None; if incoming_pdu.prev_events.len() == 1 { @@ -1284,7 +1325,7 @@ async fn upgrade_outlier_to_timeline_pdu( prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); if let Some(Ok(mut state)) = state { - warn!("Using cached state"); + info!("Using cached state"); let prev_pdu = db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { "Could not find prev event, but we know the state.".to_owned() @@ -1307,7 +1348,7 @@ async fn upgrade_outlier_to_timeline_pdu( state_at_incoming_event = Some(state); } } else { - warn!("Calculating state at event using state res"); + info!("Calculating state at event using state res"); let mut extremity_sstatehashes = HashMap::new(); let mut okay = true; @@ -1375,18 +1416,18 @@ async fn upgrade_outlier_to_timeline_pdu( fork_states.push(state); } - state_at_incoming_event = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { + let lock = db.globals.stateres_mutex.lock(); + + let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + drop(lock); + + state_at_incoming_event = match result { Ok(new_state) => Some( new_state .into_iter() @@ -1407,12 +1448,12 @@ async fn upgrade_outlier_to_timeline_pdu( warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); None } - }; + } } } if state_at_incoming_event.is_none() { - warn!("Calling /state_ids"); + info!("Calling /state_ids"); // Call /state_ids to find out what the state at this pdu is. We trust the server's // response to some extend, but we still do a lot of checks on the events match db @@ -1428,7 +1469,7 @@ async fn upgrade_outlier_to_timeline_pdu( .await { Ok(res) => { - warn!("Fetching state events at event."); + info!("Fetching state events at event."); let state_vec = fetch_and_handle_outliers( db, origin, @@ -1513,7 +1554,7 @@ async fn upgrade_outlier_to_timeline_pdu( if !check_result { return Err("Event has failed auth check with state at the event.".into()); } - debug!("Auth check succeeded."); + info!("Auth check succeeded."); // We start looking at current room state now, so lets lock the room @@ -1576,7 +1617,7 @@ async fn upgrade_outlier_to_timeline_pdu( .collect::>()?; // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - debug!("starting soft fail auth check"); + info!("Starting soft fail auth check"); let soft_fail = !state_res::event_auth::auth_check( &room_version, @@ -1610,8 +1651,10 @@ async fn upgrade_outlier_to_timeline_pdu( } if incoming_pdu.state_key.is_some() { + info!("Preparing for stateres to derive new room state"); let mut extremity_sstatehashes = HashMap::new(); + info!("Loading extremities"); for id in dbg!(&extremities) { match db .rooms @@ -1671,6 +1714,7 @@ async fn upgrade_outlier_to_timeline_pdu( let new_room_state = if fork_states.is_empty() { return Err("State is empty.".to_owned()); } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + info!("State resolution trivial"); // There was only one state, so it has to be the room's current state (because that is // always included) fork_states[0] @@ -1682,6 +1726,7 @@ async fn upgrade_outlier_to_timeline_pdu( }) .collect::>()? } else { + info!("Loading auth chains"); // We do need to force an update to this room's state update_state = true; @@ -1698,6 +1743,8 @@ async fn upgrade_outlier_to_timeline_pdu( ); } + info!("Loading fork states"); + let fork_states: Vec<_> = fork_states .into_iter() .map(|map| { @@ -1715,6 +1762,9 @@ async fn upgrade_outlier_to_timeline_pdu( }) .collect(); + info!("Resolving state"); + + let lock = db.globals.stateres_mutex.lock(); let state = match state_res::resolve( room_version_id, &fork_states, @@ -1733,6 +1783,10 @@ async fn upgrade_outlier_to_timeline_pdu( } }; + drop(lock); + + info!("State resolution done. Compressing state"); + state .into_iter() .map(|((event_type, state_key), event_id)| { @@ -1753,13 +1807,14 @@ async fn upgrade_outlier_to_timeline_pdu( // Set the new room state to the resolved state if update_state { + info!("Forcing new room state"); db.rooms .force_state(room_id, new_room_state, db) .map_err(|_| "Failed to set new room state.".to_owned())?; } - debug!("Updated resolved state"); } + info!("Appending pdu to timeline"); extremities.insert(incoming_pdu.event_id.clone()); // Now that the event has passed all auth it is added into the timeline. @@ -1780,7 +1835,7 @@ async fn upgrade_outlier_to_timeline_pdu( "Failed to add pdu to db.".to_owned() })?; - debug!("Appended incoming pdu."); + info!("Appended incoming pdu"); // Event has passed all auth/stateres checks drop(state_lock); @@ -1854,7 +1909,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( continue; } - warn!("Fetching {} over federation.", next_id); + info!("Fetching {} over federation.", next_id); match db .sending .send_federation_request( @@ -1865,7 +1920,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>( .await { Ok(res) => { - warn!("Got {} over federation", next_id); + info!("Got {} over federation", next_id); let (calculated_event_id, value) = match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { Ok(t) => t, From 0bc03e90a11de7d68d4d17676c9122bc2c6953ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 18 Jun 2022 16:38:41 +0200 Subject: [PATCH 288/445] improvement: make more things async --- src/client_server/context.rs | 2 +- src/client_server/membership.rs | 68 +++++++++------- src/client_server/state.rs | 3 +- src/client_server/sync.rs | 102 ++++++++++++++---------- src/database/admin.rs | 12 +-- src/database/rooms.rs | 75 ++++++++--------- src/server_server.rs | 137 +++++++++++++++++++++++--------- 7 files changed, 244 insertions(+), 155 deletions(-) diff --git a/src/client_server/context.rs b/src/client_server/context.rs index de7aae93..e93f5a5b 100644 --- a/src/client_server/context.rs +++ b/src/client_server/context.rs @@ -137,7 +137,7 @@ pub async fn get_context_route( .expect("All rooms have state"), }; - let state_ids = db.rooms.state_full_ids(shortstatehash)?; + let state_ids = db.rooms.state_full_ids(shortstatehash).await?; let end_token = events_after .last() diff --git a/src/client_server/membership.rs b/src/client_server/membership.rs index 65107a3c..a1b616be 100644 --- a/src/client_server/membership.rs +++ b/src/client_server/membership.rs @@ -29,7 +29,7 @@ use ruma::{ }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + collections::{hash_map::Entry, BTreeMap, HashMap}, iter, sync::{Arc, RwLock}, time::{Duration, Instant}, @@ -48,19 +48,20 @@ pub async fn join_room_by_id_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut servers: HashSet<_> = db - .rooms - .invite_state(sender_user, &body.room_id)? - .unwrap_or_default() - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - servers.insert(body.room_id.server_name().to_owned()); + let mut servers = Vec::new(); // There is no body.server_name for /roomId/join + servers.extend( + db.rooms + .invite_state(sender_user, &body.room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + servers.push(body.room_id.server_name().to_owned()); let ret = join_room_by_id_helper( &db, @@ -91,19 +92,20 @@ pub async fn join_room_by_id_or_alias_route( let (servers, room_id) = match Box::::try_from(body.room_id_or_alias) { Ok(room_id) => { - let mut servers: HashSet<_> = db - .rooms - .invite_state(sender_user, &room_id)? - .unwrap_or_default() - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); + let mut servers = body.server_name.clone(); + servers.extend( + db.rooms + .invite_state(sender_user, &room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); - servers.insert(room_id.server_name().to_owned()); + servers.push(room_id.server_name().to_owned()); (servers, room_id) } Err(room_alias) => { @@ -413,7 +415,8 @@ pub async fn get_member_events_route( Ok(get_member_events::v3::Response { chunk: db .rooms - .room_state_full(&body.room_id)? + .room_state_full(&body.room_id) + .await? .iter() .filter(|(key, _)| key.0 == StateEventType::RoomMember) .map(|(_, pdu)| pdu.to_member_event().into()) @@ -462,7 +465,7 @@ async fn join_room_by_id_helper( db: &Database, sender_user: Option<&UserId>, room_id: &RoomId, - servers: &HashSet>, + servers: &[Box], _third_party_signed: Option<&IncomingThirdPartySigned>, ) -> Result { let sender_user = sender_user.expect("user is authenticated"); @@ -478,7 +481,7 @@ async fn join_room_by_id_helper( let state_lock = mutex_state.lock().await; // Ask a remote server if we don't have this room - if !db.rooms.exists(room_id)? && room_id.server_name() != db.globals.server_name() { + if !db.rooms.exists(room_id)? { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); @@ -1032,6 +1035,13 @@ pub(crate) async fn invite_helper<'a>( return Ok(()); } + if !db.rooms.is_joined(sender_user, &room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + let mutex_state = Arc::clone( db.globals .roomid_mutex_state diff --git a/src/client_server/state.rs b/src/client_server/state.rs index 50fe9b4f..4df953cf 100644 --- a/src/client_server/state.rs +++ b/src/client_server/state.rs @@ -124,7 +124,8 @@ pub async fn get_state_events_route( Ok(get_state_events::v3::Response { room_state: db .rooms - .room_state_full(&body.room_id)? + .room_state_full(&body.room_id) + .await? .values() .map(|pdu| pdu.to_state_event()) .collect(), diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs index d61e6894..0c294b7e 100644 --- a/src/client_server/sync.rs +++ b/src/client_server/sync.rs @@ -230,18 +230,20 @@ async fn sync_helper( for room_id in all_joined_rooms { let room_id = room_id?; - // Get and drop the lock to wait for remaining operations to finish - // This will make sure the we have all events until next_batch - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); + { + // Get and drop the lock to wait for remaining operations to finish + // This will make sure the we have all events until next_batch + let mutex_insert = Arc::clone( + db.globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } let timeline_pdus; let limited; @@ -296,10 +298,12 @@ async fn sync_helper( // Database queries: - let current_shortstatehash = db - .rooms - .current_shortstatehash(&room_id)? - .expect("All rooms have state"); + let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? { + s + } else { + error!("Room {} has no state", room_id); + continue; + }; let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; @@ -377,11 +381,12 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; + let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?; let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); + let mut i = 0; for (shortstatekey, id) in current_state_ids { let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; @@ -394,6 +399,11 @@ async fn sync_helper( } }; state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } } else if !lazy_load_enabled || body.full_state || timeline_users.contains(&state_key) @@ -411,6 +421,11 @@ async fn sync_helper( lazy_loaded.insert(uid); } state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } } } @@ -462,8 +477,8 @@ async fn sync_helper( let mut lazy_loaded = HashSet::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; + let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?; + let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?; for (key, id) in current_state_ids { if body.full_state || since_state_ids.get(&key) != Some(&id) { @@ -490,6 +505,7 @@ async fn sync_helper( } state_events.push(pdu); + tokio::task::yield_now().await; } } } @@ -753,17 +769,19 @@ async fn sync_helper( for result in all_left_rooms { let (room_id, left_state_events) = result?; - // Get and drop the lock to wait for remaining operations to finish - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); + { + // Get and drop the lock to wait for remaining operations to finish + let mutex_insert = Arc::clone( + db.globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; @@ -793,17 +811,19 @@ async fn sync_helper( for result in all_invited_rooms { let (room_id, invite_state_events) = result?; - // Get and drop the lock to wait for remaining operations to finish - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); + { + // Get and drop the lock to wait for remaining operations to finish + let mutex_insert = Arc::clone( + db.globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; diff --git a/src/database/admin.rs b/src/database/admin.rs index c6ef9a64..3ed1a8a9 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -116,7 +116,7 @@ impl Admin { send_message(content, guard, &state_lock); } AdminRoomEvent::ProcessMessage(room_message) => { - let reply_message = process_admin_message(&*guard, room_message); + let reply_message = process_admin_message(&*guard, room_message).await; send_message(reply_message, guard, &state_lock); } @@ -143,7 +143,7 @@ impl Admin { } // Parse and process a message from the admin room -fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { +async fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { let mut lines = room_message.lines(); let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); @@ -161,7 +161,7 @@ fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEven } }; - match process_admin_command(db, admin_command, body) { + match process_admin_command(db, admin_command, body).await { Ok(reply_message) => reply_message, Err(error) => { let markdown_message = format!( @@ -290,7 +290,7 @@ enum AdminCommand { EnableRoom { room_id: Box }, } -fn process_admin_command( +async fn process_admin_command( db: &Database, command: AdminCommand, body: Vec<&str>, @@ -404,7 +404,9 @@ fn process_admin_command( Error::bad_database("Invalid room id field in event in database") })?; let start = Instant::now(); - let count = server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); + let count = server_server::get_auth_chain(room_id, vec![event_id], db) + .await? + .count(); let elapsed = start.elapsed(); RoomMessageEventContent::text_plain(format!( "Loaded auth chain with length {} in {:?}", diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 2c1b8f44..7b3b7506 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -144,20 +144,28 @@ impl Rooms { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] - pub fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") .1; - full_state - .into_iter() - .map(|compressed| self.parse_compressed_state_event(compressed)) - .collect() + let mut result = BTreeMap::new(); + let mut i = 0; + for compressed in full_state.into_iter() { + let parsed = self.parse_compressed_state_event(compressed)?; + result.insert(parsed.0, parsed.1); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + Ok(result) } #[tracing::instrument(skip(self))] - pub fn state_full( + pub async fn state_full( &self, shortstatehash: u64, ) -> Result>> { @@ -166,14 +174,13 @@ impl Rooms { .pop() .expect("there is always one layer") .1; - Ok(full_state - .into_iter() - .map(|compressed| self.parse_compressed_state_event(compressed)) - .filter_map(|r| r.ok()) - .map(|(_, eventid)| self.get_pdu(&eventid)) - .filter_map(|r| r.ok().flatten()) - .map(|pdu| { - Ok::<_, Error>(( + + let mut result = HashMap::new(); + let mut i = 0; + for compressed in full_state { + let (_, eventid) = self.parse_compressed_state_event(compressed)?; + if let Some(pdu) = self.get_pdu(&eventid)? { + result.insert( ( pdu.kind.to_string().into(), pdu.state_key @@ -182,10 +189,16 @@ impl Rooms { .clone(), ), pdu, - )) - }) - .filter_map(|r| r.ok()) - .collect()) + ); + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + Ok(result) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -228,7 +241,6 @@ impl Rooms { } /// Returns the state hash for this pdu. - #[tracing::instrument(skip(self))] pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.eventid_shorteventid .get(event_id.as_bytes())? @@ -531,7 +543,6 @@ impl Rooms { } } - #[tracing::instrument(skip(self, globals))] pub fn compress_state_event( &self, shortstatekey: u64, @@ -548,7 +559,6 @@ impl Rooms { } /// Returns shortstatekey, event id - #[tracing::instrument(skip(self, compressed_event))] pub fn parse_compressed_state_event( &self, compressed_event: CompressedStateEvent, @@ -707,7 +717,6 @@ impl Rooms { } /// Returns (shortstatehash, already_existed) - #[tracing::instrument(skip(self, globals))] fn get_or_create_shortstatehash( &self, state_hash: &StateHashId, @@ -728,7 +737,6 @@ impl Rooms { }) } - #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shorteventid( &self, event_id: &EventId, @@ -759,7 +767,6 @@ impl Rooms { Ok(short) } - #[tracing::instrument(skip(self))] pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.roomid_shortroomid .get(room_id.as_bytes())? @@ -770,7 +777,6 @@ impl Rooms { .transpose() } - #[tracing::instrument(skip(self))] pub fn get_shortstatekey( &self, event_type: &StateEventType, @@ -808,7 +814,6 @@ impl Rooms { Ok(short) } - #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shortroomid( &self, room_id: &RoomId, @@ -826,7 +831,6 @@ impl Rooms { }) } - #[tracing::instrument(skip(self, globals))] pub fn get_or_create_shortstatekey( &self, event_type: &StateEventType, @@ -867,7 +871,6 @@ impl Rooms { Ok(short) } - #[tracing::instrument(skip(self))] pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { if let Some(id) = self .shorteventid_cache @@ -896,7 +899,6 @@ impl Rooms { Ok(event_id) } - #[tracing::instrument(skip(self))] pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { if let Some(id) = self .shortstatekey_cache @@ -940,12 +942,12 @@ impl Rooms { /// Returns the full room state. #[tracing::instrument(skip(self))] - pub fn room_state_full( + pub async fn room_state_full( &self, room_id: &RoomId, ) -> Result>> { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash) + self.state_full(current_shortstatehash).await } else { Ok(HashMap::new()) } @@ -982,14 +984,12 @@ impl Rooms { } /// Returns the `count` of this pdu's id. - #[tracing::instrument(skip(self))] pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } /// Returns the `count` of this pdu's id. - #[tracing::instrument(skip(self))] pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -1018,7 +1018,6 @@ impl Rooms { } /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -1037,7 +1036,6 @@ impl Rooms { } /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? @@ -1048,7 +1046,6 @@ impl Rooms { } /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] pub fn get_non_outlier_pdu_json( &self, event_id: &EventId, @@ -1068,7 +1065,6 @@ impl Rooms { } /// Returns the pdu's id. - #[tracing::instrument(skip(self))] pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.eventid_pduid.get(event_id.as_bytes()) } @@ -1076,7 +1072,6 @@ impl Rooms { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[tracing::instrument(skip(self))] pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? @@ -1095,7 +1090,6 @@ impl Rooms { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[tracing::instrument(skip(self))] pub fn get_pdu(&self, event_id: &EventId) -> Result>> { if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { return Ok(Some(Arc::clone(p))); @@ -1132,7 +1126,6 @@ impl Rooms { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - #[tracing::instrument(skip(self))] pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -1143,7 +1136,6 @@ impl Rooms { } /// Returns the pdu as a `BTreeMap`. - #[tracing::instrument(skip(self))] pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( @@ -1232,7 +1224,6 @@ impl Rooms { } /// Returns the pdu from the outlier tree. - #[tracing::instrument(skip(self))] pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? diff --git a/src/server_server.rs b/src/server_server.rs index 7b08cf9b..6fa83e4c 100644 --- a/src/server_server.rs +++ b/src/server_server.rs @@ -691,7 +691,7 @@ pub async fn send_transaction_message_route( .roomid_mutex_federation .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let mutex_lock = mutex.lock().await; @@ -1054,6 +1054,25 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&*prev_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", prev_id); + continue; + } + } + if errors >= 5 { break; } @@ -1068,7 +1087,6 @@ pub(crate) async fn handle_incoming_pdu<'a>( .write() .unwrap() .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - let event_id = pdu.event_id.clone(); if let Err(e) = upgrade_outlier_to_timeline_pdu( pdu, json, @@ -1081,7 +1099,21 @@ pub(crate) async fn handle_incoming_pdu<'a>( .await { errors += 1; - warn!("Prev event {} failed: {}", event_id, e); + warn!("Prev event {} failed: {}", prev_id, e); + match db + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } } let elapsed = start_time.elapsed(); db.globals @@ -1091,7 +1123,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( .remove(&room_id.to_owned()); warn!( "Handling prev event {} took {}m{}s", - event_id, + prev_id, elapsed.as_secs() / 60, elapsed.as_secs() % 60 ); @@ -1321,8 +1353,11 @@ async fn upgrade_outlier_to_timeline_pdu( .pdu_shortstatehash(prev_event) .map_err(|_| "Failed talking to db".to_owned())?; - let state = - prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); + let state = if let Some(shortstatehash) = prev_event_sstatehash { + Some(db.rooms.state_full_ids(shortstatehash).await) + } else { + None + }; if let Some(Ok(mut state)) = state { info!("Using cached state"); @@ -1378,6 +1413,7 @@ async fn upgrade_outlier_to_timeline_pdu( let mut leaf_state: BTreeMap<_, _> = db .rooms .state_full_ids(sstatehash) + .await .map_err(|_| "Failed to ask db for room state.".to_owned())?; if let Some(state_key) = &prev_event.state_key { @@ -1409,6 +1445,7 @@ async fn upgrade_outlier_to_timeline_pdu( auth_chain_sets.push( get_auth_chain(room_id, starting_events, db) + .await .map_err(|_| "Failed to load auth chain.".to_owned())? .collect(), ); @@ -1535,6 +1572,7 @@ async fn upgrade_outlier_to_timeline_pdu( let state_at_incoming_event = state_at_incoming_event.expect("we always set this to some above"); + info!("Starting auth check"); // 11. Check the auth of the event passes based on the state of the event let check_result = state_res::event_auth::auth_check( &room_version, @@ -1554,7 +1592,7 @@ async fn upgrade_outlier_to_timeline_pdu( if !check_result { return Err("Event has failed auth check with state at the event.".into()); } - info!("Auth check succeeded."); + info!("Auth check succeeded"); // We start looking at current room state now, so lets lock the room @@ -1570,6 +1608,7 @@ async fn upgrade_outlier_to_timeline_pdu( // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) + info!("Calculating extremities"); let mut extremities = db .rooms .get_pdu_leaves(room_id) @@ -1585,16 +1624,18 @@ async fn upgrade_outlier_to_timeline_pdu( // Only keep those extremities were not referenced yet extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); - let current_sstatehash = db - .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? - .expect("every room has state"); + info!("Compressing state at event"); + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + db.rooms + .compress_state_event(*shortstatekey, id, &db.globals) + .map_err(|_| "Failed to compress_state_event".to_owned()) + }) + .collect::>()?; - let current_state_ids = db - .rooms - .state_full_ids(current_sstatehash) - .map_err(|_| "Failed to load room state.")?; + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + info!("Starting soft fail auth check"); let auth_events = db .rooms @@ -1607,18 +1648,6 @@ async fn upgrade_outlier_to_timeline_pdu( ) .map_err(|_| "Failed to get_auth_events.".to_owned())?; - let state_ids_compressed = state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, id, &db.globals) - .map_err(|_| "Failed to compress_state_event".to_owned()) - }) - .collect::>()?; - - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - info!("Starting soft fail auth check"); - let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, @@ -1651,6 +1680,19 @@ async fn upgrade_outlier_to_timeline_pdu( } if incoming_pdu.state_key.is_some() { + info!("Loading current room state ids"); + let current_sstatehash = db + .rooms + .current_shortstatehash(room_id) + .map_err(|_| "Failed to load current state hash.".to_owned())? + .expect("every room has state"); + + let current_state_ids = db + .rooms + .state_full_ids(current_sstatehash) + .await + .map_err(|_| "Failed to load room state.")?; + info!("Preparing for stateres to derive new room state"); let mut extremity_sstatehashes = HashMap::new(); @@ -1738,6 +1780,7 @@ async fn upgrade_outlier_to_timeline_pdu( state.iter().map(|(_, id)| id.clone()).collect(), db, ) + .await .map_err(|_| "Failed to load auth chain.".to_owned())? .collect(), ); @@ -1899,11 +1942,17 @@ pub(crate) fn fetch_and_handle_outliers<'a>( let mut todo_auth_events = vec![Arc::clone(id)]; let mut events_in_reverse_order = Vec::new(); let mut events_all = HashSet::new(); + let mut i = 0; while let Some(next_id) = todo_auth_events.pop() { if events_all.contains(&next_id) { continue; } + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { trace!("Found {} in db", id); continue; @@ -2242,7 +2291,7 @@ fn append_incoming_pdu<'a>( } #[tracing::instrument(skip(starting_events, db))] -pub(crate) fn get_auth_chain<'a>( +pub(crate) async fn get_auth_chain<'a>( room_id: &RoomId, starting_events: Vec>, db: &'a Database, @@ -2251,10 +2300,15 @@ pub(crate) fn get_auth_chain<'a>( let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; + let mut i = 0; for id in starting_events { let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; let bucket_id = (short % NUM_BUCKETS as u64) as usize; buckets[bucket_id].insert((short, id.clone())); + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } } let mut full_auth_chain = HashSet::new(); @@ -2277,6 +2331,7 @@ pub(crate) fn get_auth_chain<'a>( let mut chunk_cache = HashSet::new(); let mut hits2 = 0; let mut misses2 = 0; + let mut i = 0; for (sevent_id, event_id) in chunk { if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { hits2 += 1; @@ -2292,6 +2347,11 @@ pub(crate) fn get_auth_chain<'a>( auth_chain.len() ); chunk_cache.extend(auth_chain.iter()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } }; } println!( @@ -2512,7 +2572,7 @@ pub async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; + let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -2557,7 +2617,8 @@ pub async fn get_room_state_route( let pdus = db .rooms - .state_full_ids(shortstatehash)? + .state_full_ids(shortstatehash) + .await? .into_iter() .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( @@ -2566,7 +2627,8 @@ pub async fn get_room_state_route( }) .collect(); - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; + let auth_chain_ids = + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids @@ -2616,12 +2678,14 @@ pub async fn get_room_state_ids_route( let pdu_ids = db .rooms - .state_full_ids(shortstatehash)? + .state_full_ids(shortstatehash) + .await? .into_iter() .map(|(_, id)| (*id).to_owned()) .collect(); - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; + let auth_chain_ids = + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), @@ -2927,12 +2991,13 @@ async fn create_join_event( ))?; drop(mutex_lock); - let state_ids = db.rooms.state_full_ids(shortstatehash)?; + let state_ids = db.rooms.state_full_ids(shortstatehash).await?; let auth_chain_ids = get_auth_chain( room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), db, - )?; + ) + .await?; let servers = db .rooms From 7bee9c1c69e8fb3d80039dd7532d4fdd5cfc5d66 Mon Sep 17 00:00:00 2001 From: Dietrich Date: Sun, 19 Jun 2022 06:56:51 +0200 Subject: [PATCH 289/445] Length of passwords consistently use the constant --- src/database/admin.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 3ed1a8a9..5a0c28a9 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -525,7 +525,7 @@ async fn process_admin_command( )); } - let new_password = utils::random_string(20); + let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); match db.users.set_password(&user_id, Some(new_password.as_str())) { Ok(()) => RoomMessageEventContent::text_plain(format!( From 0c8e51e1b70fcd8bc6b541614a3a0ff555817ff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 19 Jun 2022 15:38:21 +0200 Subject: [PATCH 290/445] Upgrade dependencies --- Cargo.lock | 477 +++++++++++++++++++++++++++++------------------------ Cargo.toml | 28 ++-- 2 files changed, 272 insertions(+), 233 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 3a251b66..d8d791f0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -20,7 +20,7 @@ version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "once_cell", "version_check", ] @@ -84,9 +84,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-compression" -version = "0.3.12" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2bf394cfbbe876f0ac67b13b6ca819f9c9f2fb9ec67223cceb1555fbab1c31a" +checksum = "345fd392ab01f746c717b1357165b76f0b67a60192007b234058c9045fdcf695" dependencies = [ "brotli", "flate2", @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.53" +version = "0.1.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed6aa3524a2dfcf9fe180c51eae2b58738348d819517ceadf95789c51fff7600" +checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" dependencies = [ "proc-macro2", "quote", @@ -124,9 +124,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.1" +version = "0.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "47594e438a243791dba58124b6669561f5baa14cb12046641d8008bf035e5a25" +checksum = "8b4d4f9a5ca8b1ab8de59e663e68c6207059239373ca72980f5be7ab81231f74" dependencies = [ "async-trait", "axum-core", @@ -145,6 +145,7 @@ dependencies = [ "pin-project-lite", "serde", "serde_json", + "serde_urlencoded", "sync_wrapper", "tokio", "tower", @@ -155,9 +156,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.1" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a671c9ae99531afdd5d3ee8340b8da547779430689947144c140fc74a740244" +checksum = "cf4d047478b986f14a13edad31a009e2e05cb241f9805d0d75e4cba4e129ad4d" dependencies = [ "async-trait", "bytes", @@ -169,9 +170,9 @@ dependencies = [ [[package]] name = "axum-server" -version = "0.3.3" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f9cfd9dbe28ebde5c0460067ea27c6f3b1d514b699c4e0a5aab0fb63e452a8a8" +checksum = "abf18303ef7e23b045301555bf8a0dfbc1444ea1a37b3c81757a32680ace4d7d" dependencies = [ "arc-swap", "bytes", @@ -181,7 +182,7 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile", + "rustls-pemfile 1.0.0", "tokio", "tokio-rustls", "tower-service", @@ -285,9 +286,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.9.1" +version = "3.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a45a46ab1f2412e53d3a0ade76ffad2025804294569aae387231a0cd6e0899" +checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" [[package]] name = "bytemuck" @@ -352,9 +353,9 @@ dependencies = [ [[package]] name = "clang-sys" -version = "1.3.1" +version = "1.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cc00842eed744b858222c4c9faf7243aafc6d33f92f96935263ef4d8a41ce21" +checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" dependencies = [ "glob", "libc", @@ -363,23 +364,23 @@ dependencies = [ [[package]] name = "clap" -version = "3.1.8" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71c47df61d9e16dc010b55dba1952a57d8c215dbb533fd13cdd13369aac73b1c" +checksum = "d53da17d37dba964b9b3ecb5c5a1f193a2762c700e6829201e645b9381c99dc7" dependencies = [ "bitflags", "clap_derive", + "clap_lex", "indexmap", - "lazy_static", - "os_str_bytes", + "once_cell", "textwrap", ] [[package]] name = "clap_derive" -version = "3.1.7" +version = "3.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a3aab4734e083b809aaf5794e14e756d1c798d2c69c7f7de7a09a2f5214993c1" +checksum = "c11d40217d16aee8508cc8e5fde8b4ff24639758608e5374e731b53f85749fb9" dependencies = [ "heck", "proc-macro-error", @@ -388,6 +389,15 @@ dependencies = [ "syn", ] +[[package]] +name = "clap_lex" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5538cd660450ebeb4234cfecf8f2284b844ffc4c50531e66d584ad5b91293613" +dependencies = [ + "os_str_bytes", +] + [[package]] name = "color_quant" version = "1.1.0" @@ -517,17 +527,17 @@ dependencies = [ "crossbeam-deque", "crossbeam-epoch", "crossbeam-queue 0.3.5", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] name = "crossbeam-channel" -version = "0.5.4" +version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5aaa7bd5fb665c6864b5f963dd9097905c54125909c7aa94c9e18507cdbe6c53" +checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] @@ -538,20 +548,20 @@ checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" dependencies = [ "cfg-if 1.0.0", "crossbeam-epoch", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] name = "crossbeam-epoch" -version = "0.9.8" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1145cf131a2c6ba0615079ab6a638f7e1973ac9c2634fcbeaaad6114246efe8c" +checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" dependencies = [ "autocfg", "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", - "lazy_static", + "crossbeam-utils 0.8.9", "memoffset", + "once_cell", "scopeguard", ] @@ -571,7 +581,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" dependencies = [ "cfg-if 1.0.0", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] @@ -586,12 +596,12 @@ dependencies = [ [[package]] name = "crossbeam-utils" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf124c720b7686e3c2663cf54062ab0f68a88af2fb6a030e87e30bf721fcb38" +checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" dependencies = [ "cfg-if 1.0.0", - "lazy_static", + "once_cell", ] [[package]] @@ -673,9 +683,9 @@ dependencies = [ [[package]] name = "directories" -version = "3.0.2" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" +checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" dependencies = [ "dirs-sys", ] @@ -693,9 +703,9 @@ dependencies = [ [[package]] name = "ed25519" -version = "1.4.1" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d5c4b5e5959dc2c2b89918d8e2cc40fcdd623cef026ed09d2f0ee05199dc8e4" +checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ "signature", ] @@ -769,14 +779,12 @@ dependencies = [ [[package]] name = "flate2" -version = "1.0.22" +version = "1.0.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e6988e897c1c9c485f43b47a529cef42fde0547f9d8d41a7062518f1d8fc53f" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ - "cfg-if 1.0.0", "crc32fast", - "libc", - "miniz_oxide 0.4.4", + "miniz_oxide 0.5.3", ] [[package]] @@ -932,13 +940,13 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9be70c98951c83b8d2f8f60d7065fa6d5146873094452a1008da8c2f1e4205ad" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ "cfg-if 1.0.0", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] @@ -985,13 +993,19 @@ dependencies = [ "ahash", ] +[[package]] +name = "hashbrown" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" + [[package]] name = "hashlink" version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" dependencies = [ - "hashbrown", + "hashbrown 0.11.2", ] [[package]] @@ -1093,9 +1107,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31f4c6746584866f0feabcc69893c5b51beef3831656a968ed7ae254cdc4fd03" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", @@ -1104,9 +1118,9 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.4" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ff4f84919677303da5f147645dbea6b1881f368d03ac84e1dc09031ebd7b2c6" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", @@ -1121,9 +1135,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9100414882e15fb7feccb4897e5f0ff0ff1ca7d1a86a23208ada4d7a18e6c6c4" +checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" [[package]] name = "httpdate" @@ -1133,9 +1147,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.18" +version = "0.14.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b26ae0a80afebe130861d90abf98e3814a4f28a4c6ffeb5ab8ebb2be311e0ef2" +checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" dependencies = [ "bytes", "futures-channel", @@ -1198,23 +1212,20 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.8.1" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f647032dfaa1f8b6dc29bd3edb7bbef4861b8b8007ebb118d6db284fd59f6ee" +checksum = "6c6392766afd7964e2531940894cffe4bd8d7d17dbc3c1c4857040fd4b33bdb3" dependencies = [ "autocfg", - "hashbrown", + "hashbrown 0.12.1", "serde", ] [[package]] name = "indoc" -version = "1.0.4" +version = "1.0.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7906a9fababaeacb774f72410e497a1d18de916322e33797bb2cd29baa23c9e" -dependencies = [ - "unindent", -] +checksum = "05a0bd019339e5d968b37855180087b7b9d512c5046fbd244cf8c95687927d6e" [[package]] name = "inlinable_string" @@ -1251,9 +1262,9 @@ dependencies = [ [[package]] name = "ipnet" -version = "2.4.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35e70ee094dc02fd9c13fdad4940090f22dbd6ac7c9e7094a46cf0232a50bc7c" +checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" @@ -1266,9 +1277,9 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aab8fc367588b89dcee83ab0fd66b72b50b72fa1904d7095045ace2b0c81c35" +checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" [[package]] name = "jobserver" @@ -1287,9 +1298,9 @@ checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" [[package]] name = "js-sys" -version = "0.3.56" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a38fc24e30fd564ce974c02bf1d337caddff65be6cc4735a1f7eab22a7440f04" +checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" dependencies = [ "wasm-bindgen", ] @@ -1331,9 +1342,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.122" +version = "0.2.126" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec647867e2bf0772e28c8bcde4f0d19a9216916e890543b5a03ed8ef27b8f259" +checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" [[package]] name = "libloading" @@ -1397,9 +1408,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6389c490849ff5bc16be905ae24bc913a9c8892e19b2341dbc175e14c341c2b8" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ "cfg-if 1.0.0", ] @@ -1448,9 +1459,9 @@ checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" @@ -1484,35 +1495,23 @@ dependencies = [ [[package]] name = "miniz_oxide" -version = "0.4.4" +version = "0.5.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a92518e98c078586bc6c934028adcca4c92a53d6a958196de835170a01d84e4b" +checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" dependencies = [ "adler", - "autocfg", ] [[package]] name = "mio" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52da4364ffb0e4fe33a9841a98a3f3014fb964045ce4f7a45a398243c8d6b0c9" +checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" dependencies = [ "libc", "log", - "miow", - "ntapi", "wasi 0.11.0+wasi-snapshot-preview1", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "windows-sys", ] [[package]] @@ -1525,15 +1524,6 @@ dependencies = [ "minimal-lexical", ] -[[package]] -name = "ntapi" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c28774a7fd2fbb4f0babd8237ce554b73af68021b5f695a3cebd6c59bac0980f" -dependencies = [ - "winapi", -] - [[package]] name = "num-bigint" version = "0.2.6" @@ -1547,9 +1537,9 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", @@ -1557,9 +1547,9 @@ dependencies = [ [[package]] name = "num-iter" -version = "0.1.42" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" dependencies = [ "autocfg", "num-integer", @@ -1579,9 +1569,9 @@ dependencies = [ [[package]] name = "num-traits" -version = "0.2.14" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", ] @@ -1598,9 +1588,9 @@ dependencies = [ [[package]] name = "once_cell" -version = "1.10.0" +version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87f3e037eac156d1775da914196f0f37741a274155e34a0b7e427c35d2a2ecb9" +checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" [[package]] name = "opaque-debug" @@ -1668,12 +1658,9 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.0.0" +version = "6.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e22443d1643a904602595ba1cd8f7d896afe56d26712531c5ff73a15b2fbf64" -dependencies = [ - "memchr", -] +checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" [[package]] name = "page_size" @@ -1800,9 +1787,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.8" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e280fbe77cc62c91527259e9442153f4688736748d24660126286329742b4c6c" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1881,11 +1868,11 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.37" +version = "1.0.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec757218438d5fda206afc041538b2f6d889286160d649a86a24d37e1235afd1" +checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1909,9 +1896,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.17" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "632d02bff7f874a36f33ea8bb416cd484b90cc66c1194b1a1110d067a7013f58" +checksum = "f53dc8cf16a769a6f677e09e7ff2cd4be1ea0f48754aac39520536962011de0d" dependencies = [ "proc-macro2", ] @@ -1975,7 +1962,7 @@ version = "0.6.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -2002,16 +1989,16 @@ version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", "redox_syscall", "thiserror", ] [[package]] name = "regex" -version = "1.5.5" +version = "1.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a11647b6b25ff05a515cb92c365cec08801e83423a235b51e231e1808747286" +checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" dependencies = [ "aho-corasick", "memchr", @@ -2029,9 +2016,9 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" +checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" [[package]] name = "reqwest" @@ -2057,7 +2044,7 @@ dependencies = [ "pin-project-lite", "rustls", "rustls-native-certs", - "rustls-pemfile", + "rustls-pemfile 0.2.1", "serde", "serde_json", "serde_urlencoded", @@ -2282,7 +2269,7 @@ dependencies = [ "base64 0.13.0", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", ] [[package]] @@ -2293,9 +2280,9 @@ checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustls" -version = "0.20.4" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fbfeb8d0ddb84706bc597a5574ab8912817c52a397f819e5b614e2265206921" +checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ "log", "ring", @@ -2305,12 +2292,12 @@ dependencies = [ [[package]] name = "rustls-native-certs" -version = "0.6.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ca9ebdfa27d3fc180e42879037b5338ab1c040c06affd00d8338598e7800943" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile", + "rustls-pemfile 1.0.0", "schannel", "security-framework", ] @@ -2324,20 +2311,29 @@ dependencies = [ "base64 0.13.0", ] +[[package]] +name = "rustls-pemfile" +version = "1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +dependencies = [ + "base64 0.13.0", +] + [[package]] name = "ryu" -version = "1.0.9" +version = "1.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73b4b750c782965c211b42f022f59af1fbceabdd026623714f104152f1ec149f" +checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys", ] [[package]] @@ -2381,18 +2377,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce31e24b01e1e524df96f1c2fdd054405f8d7376249a5110886fb4b658484789" +checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.136" +version = "1.0.137" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08597e7152fcd306f41838ed3e37be9eaeed2b61c42e2117266a554fab4662f9" +checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" dependencies = [ "proc-macro2", "quote", @@ -2401,9 +2397,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.79" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e8d9fa5c3b304765ce1fd9c4c8a3de2c8db365a5b91be52f186efc675681d95" +checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" dependencies = [ "itoa", "ryu", @@ -2424,9 +2420,9 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.23" +version = "0.8.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4a521f2940385c165a24ee286aa8599633d162077a54bdcae2a6fd5a7bfa7a0" +checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" dependencies = [ "indexmap", "ryu", @@ -2526,7 +2522,7 @@ checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" dependencies = [ "crc32fast", "crossbeam-epoch", - "crossbeam-utils 0.8.8", + "crossbeam-utils 0.8.9", "fs2", "fxhash", "libc", @@ -2585,13 +2581,13 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.91" +version = "1.0.98" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b683b2b825c8eef438b77c36a06dc262294da3d5a5813fac20da149241dcd44d" +checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] [[package]] @@ -2629,18 +2625,18 @@ checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" [[package]] name = "thiserror" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "854babe52e4df1653706b98fcfc05843010039b406875930a70e4d9644e5c417" +checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.30" +version = "1.0.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa32fd3f627f367fe16f893e2597ae3c05020f8bba2666a4e6ea73d377e5714b" +checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" dependencies = [ "proc-macro2", "quote", @@ -2712,19 +2708,20 @@ dependencies = [ [[package]] name = "time" -version = "0.1.43" +version = "0.1.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" dependencies = [ "libc", + "wasi 0.10.0+wasi-snapshot-preview1", "winapi", ] [[package]] name = "tinyvec" -version = "1.5.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2c1c1d5a42b6245520c249549ec267180beaffcc0615401ac8e31853d4b6d8d2" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -2737,9 +2734,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.17.0" +version = "1.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2af73ac49756f3f7c01172e34a23e5d0216f6c32333757c2c61feb2bbff5a5ee" +checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" dependencies = [ "bytes", "libc", @@ -2756,9 +2753,9 @@ dependencies = [ [[package]] name = "tokio-macros" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b557f72f448c511a979e2564e55d74e6c4432fc96ff4f6241bc6bded342643b7" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -2767,9 +2764,9 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.23.3" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4151fda0cf2798550ad0b34bcfc9b9dcc2a9d2471c895c68f3a8818e54f2389e" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", @@ -2790,9 +2787,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.8" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50145484efff8818b5ccd256697f36863f587da82cf8b409c53adf1e840798e3" +checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" dependencies = [ "futures-core", "pin-project-lite", @@ -2801,9 +2798,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.1" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0edfdeb067411dba2044da6d1cb2df793dd35add7888d73c16e3381ded401764" +checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" dependencies = [ "bytes", "futures-core", @@ -2815,25 +2812,24 @@ dependencies = [ [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] [[package]] name = "tower" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a89fd63ad6adf737582df5db40d286574513c69a11dac5214dc3b5603d6713e" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" dependencies = [ "futures-core", "futures-util", "pin-project", "pin-project-lite", "tokio", - "tokio-util", "tower-layer", "tower-service", "tracing", @@ -2841,9 +2837,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.2.5" +version = "0.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aba3f3efabf7fb41fae8534fc20a817013dd1c12cb45441efb6c82e6556b4cd8" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" dependencies = [ "async-compression", "bitflags", @@ -2870,15 +2866,15 @@ checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" [[package]] name = "tower-service" -version = "0.3.1" +version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.32" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a1bdf54a7c28a2bbf701e1d2233f6c77f473486b94bee4f9678da5a148dca7f" +checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" dependencies = [ "cfg-if 1.0.0", "log", @@ -2889,9 +2885,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.20" +version = "0.1.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e65ce065b4b5c53e73bb28912318cb8c9e9ad3921f1d669eb0e68b4c8143a2b" +checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" dependencies = [ "proc-macro2", "quote", @@ -2900,11 +2896,11 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.24" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90442985ee2f57c9e1b548ee72ae842f4a9a20e3f417cc38dbc5dc684d9bb4ee" +checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" dependencies = [ - "lazy_static", + "once_cell", "valuable", ] @@ -2921,9 +2917,9 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log", @@ -3021,18 +3017,24 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "uncased" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" +checksum = "09b01702b0fd0b3fadcf98e098780badda8742d4f4a7676615cad90e8ac73622" dependencies = [ "version_check", ] [[package]] name = "unicode-bidi" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a01404663e3db436ed2746d9fefef640d868edae3cceb81c3b8d5732fda678f" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" + +[[package]] +name = "unicode-ident" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" [[package]] name = "unicode-normalization" @@ -3045,15 +3047,9 @@ dependencies = [ [[package]] name = "unicode-xid" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" - -[[package]] -name = "unindent" -version = "0.1.8" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "514672a55d7380da379785a4d70ca8386c8883ff7eaae877be4d2081cebe73d8" +checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" [[package]] name = "unsigned-varint" @@ -3085,7 +3081,7 @@ version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" dependencies = [ - "getrandom 0.2.6", + "getrandom 0.2.7", ] [[package]] @@ -3124,9 +3120,9 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.10.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" [[package]] name = "wasi" @@ -3136,9 +3132,9 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25f1af7423d8588a3d840681122e72e6a24ddbcb3f0ec385cac0d12d24256c06" +checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" dependencies = [ "cfg-if 1.0.0", "wasm-bindgen-macro", @@ -3146,9 +3142,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b21c0df030f5a177f3cba22e9bc4322695ec43e7257d865302900290bcdedca" +checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" dependencies = [ "bumpalo", "lazy_static", @@ -3161,9 +3157,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.29" +version = "0.4.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb6ec270a31b1d3c7e266b999739109abce8b6c87e4b31fcfcd788b65267395" +checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" dependencies = [ "cfg-if 1.0.0", "js-sys", @@ -3173,9 +3169,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f4203d69e40a52ee523b2529a773d5ffc1dc0071801c87b3d270b471b80ed01" +checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3183,9 +3179,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bfa8a30d46208db204854cadbb5d4baf5fcf8071ba5bf48190c3e59937962ebc" +checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" dependencies = [ "proc-macro2", "quote", @@ -3196,15 +3192,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.79" +version = "0.2.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d958d035c4438e28c70e4321a2911302f10135ce78a9c7834c0cab4123d06a2" +checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" [[package]] name = "web-sys" -version = "0.3.56" +version = "0.3.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c060b319f29dd25724f09a2ba1418f142f539b2be99fbf4d2d5a8f7330afb8eb" +checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" dependencies = [ "js-sys", "wasm-bindgen", @@ -3222,9 +3218,9 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.5" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b77fdfd5a253be4ab714e4ffa3c49caf146b4de743e97510c0656cf90f1e8e" +checksum = "9c97e489d8f836838d497091de568cf16b117486d529ec5579233521065bd5e4" [[package]] name = "widestring" @@ -3260,6 +3256,49 @@ version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" +[[package]] +name = "windows-sys" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" +dependencies = [ + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", +] + +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + [[package]] name = "winreg" version = "0.6.2" diff --git a/Cargo.toml b/Cargo.toml index 10be7501..f150c4e7 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -14,10 +14,10 @@ edition = "2021" [dependencies] # Web framework -axum = { version = "0.5.0", default-features = false, features = ["headers", "http1", "http2", "json", "matched-path"], optional = true } -axum-server = { version = "0.3.3", features = ["tls-rustls"] } -tower = { version = "0.4.11", features = ["util"] } -tower-http = { version = "0.2.1", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } +axum = { version = "0.5.8", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } +axum-server = { version = "0.4.0", features = ["tls-rustls"] } +tower = { version = "0.4.8", features = ["util"] } +tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } @@ -28,19 +28,19 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318c # Async runtime and utilities tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently -sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } +sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } -persy = { version = "1.2" , optional = true, features = ["background_ops"] } +persy = { version = "1.0.0", optional = true, features = ["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" http = "0.2.4" # Used to find data directory for default db path -directories = "3.0.2" +directories = "4.0.0" # Used for ruma wrapper -serde_json = { version = "1.0.70", features = ["raw_value"] } +serde_json = { version = "1.0.68", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.8.20" +serde_yaml = "0.8.21" # Used for pdu definition serde = { version = "1.0.130", features = ["rc"] } # Used for secure identifiers @@ -50,7 +50,7 @@ rust-argon2 = "0.8.3" # Used to send requests reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type -thiserror = "1.0.28" +thiserror = "1.0.29" # Used to generate thumbnails for images image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key @@ -64,8 +64,8 @@ regex = "1.5.4" # jwt jsonwebtokens jsonwebtoken = "7.2.0" # Performance measurements -tracing = { version = "0.1.26", features = [] } -tracing-subscriber = "0.2.20" +tracing = { version = "0.1.27", features = [] } +tracing-subscriber = "0.2.22" tracing-flame = "0.1.0" opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } @@ -83,8 +83,8 @@ thread_local = "1.1.3" hmac = "0.11.0" sha-1 = "0.9.8" # used for conduit's CLI and admin room command parsing -clap = { version = "3.0.10", default-features = false, features = ["std", "derive"] } -futures-util = { version = "0.3.19", default-features = false } +clap = { version = "3.2.5", default-features = false, features = ["std", "derive"] } +futures-util = { version = "0.3.17", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.6", features = ["env", "toml"] } From f6183e457d3c7743d916cb51ab1441c1e9005643 Mon Sep 17 00:00:00 2001 From: Zeyphros Date: Sat, 2 Apr 2022 14:00:19 +0200 Subject: [PATCH 291/445] Implement command to deactivate user from admin channel Use `leave_room` in `leave_all_rooms` WIP: Add command to delete a list of users also implements a flag to prevent the user from being removed from their joined rooms. Report user deactivation failure reason Don't send leave events by default when mass deactivating user accounts Don't stop leaving rooms if an error was encountered WIP: Rename command, make flags consistent, don't deactivate admin accounts. Accounts should be deactivated as fast as possible and removing users from joined groups is completed afterwards. Fix admin safety logic, improve command output Continue leaving rooms if a room_id is invalid Ignore errors from leave_room Add notice to the list-local-users command Output form list-local-users can be used directly without modification with the deactivate-all command Only get mutex lock for admin room when sending message --- src/client_server/account.rs | 53 +------------ src/database/admin.rs | 142 ++++++++++++++++++++++++++++++++--- src/database/rooms.rs | 21 ++++++ 3 files changed, 156 insertions(+), 60 deletions(-) diff --git a/src/client_server/account.rs b/src/client_server/account.rs index 984b1ba2..dc0782d1 100644 --- a/src/client_server/account.rs +++ b/src/client_server/account.rs @@ -4,7 +4,7 @@ use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ database::{admin::make_user_admin, DatabaseGuard}, pdu::PduBuilder, - utils, Error, Result, Ruma, + utils, Database, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -398,55 +398,8 @@ pub async fn deactivate_route( return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - // Leave all joined rooms and reject all invitations - // TODO: work over federation invites - let all_rooms = db - .rooms - .rooms_joined(sender_user) - .chain( - db.rooms - .rooms_invited(sender_user) - .map(|t| t.map(|(r, _)| r)), - ) - .collect::>(); - - for room_id in all_rooms { - let room_id = room_id?; - let event = RoomMemberEventContent { - membership: MembershipState::Leave, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }; - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - sender_user, - &room_id, - &db, - &state_lock, - )?; - } + // Make the user leave all rooms before deactivation + db.rooms.leave_all_rooms(&sender_user, &db).await?; // Remove devices and mark account as deactivated db.users.deactivate_account(sender_user)?; diff --git a/src/database/admin.rs b/src/database/admin.rs index 5a0c28a9..328c99ca 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -101,6 +101,12 @@ impl Admin { tokio::select! { Some(event) = receiver.recv() => { let guard = db.read().await; + + let message_content = match event { + AdminRoomEvent::SendMessage(content) => content, + AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(&*guard, room_message).await + }; + let mutex_state = Arc::clone( guard.globals .roomid_mutex_state @@ -109,18 +115,10 @@ impl Admin { .entry(conduit_room.clone()) .or_default(), ); - let state_lock = mutex_state.lock().await; - match event { - AdminRoomEvent::SendMessage(content) => { - send_message(content, guard, &state_lock); - } - AdminRoomEvent::ProcessMessage(room_message) => { - let reply_message = process_admin_message(&*guard, room_message).await; + let state_lock = mutex_state.lock().await; - send_message(reply_message, guard, &state_lock); - } - } + send_message(message_content, guard, &state_lock); drop(state_lock); } @@ -240,6 +238,39 @@ enum AdminCommand { /// List all rooms we are currently handling an incoming pdu from IncomingFederation, + /// Deactivate a user + /// + /// User will be removed from all rooms by default. + /// This behaviour can be overridden with the --no-leave-rooms flag. + DeactivateUser { + #[clap(short, long)] + leave_rooms: bool, + user_id: Box, + }, + + #[clap(verbatim_doc_comment)] + /// Deactivate a list of users + /// + /// Recommended to use in conjunction with list-local-users. + /// + /// Users will not be removed from joined rooms by default. + /// Can be overridden with --leave-rooms flag. + /// Removing a mass amount of users from a room may cause a significant amount of leave events. + /// The time to leave rooms may depend significantly on joined rooms and servers. + /// + /// [commandbody] + /// # ``` + /// # User list here + /// # ``` + DeactivateAll { + #[clap(short, long)] + /// Remove users from their joined rooms + leave_rooms: bool, + #[clap(short, long)] + /// Also deactivate admin accounts + force: bool, + }, + /// Get the auth_chain of a PDU GetAuthChain { /// An event ID (the $ character followed by the base64 reference hash) @@ -603,6 +634,97 @@ async fn process_admin_command( db.rooms.disabledroomids.remove(room_id.as_bytes())?; RoomMessageEventContent::text_plain("Room enabled.") } + AdminCommand::DeactivateUser { + leave_rooms, + user_id, + } => { + let user_id = Arc::::from(user_id); + if db.users.exists(&user_id)? { + RoomMessageEventContent::text_plain(format!( + "Making {} leave all rooms before deactivation...", + user_id + )); + + db.users.deactivate_account(&user_id)?; + + if leave_rooms { + db.rooms.leave_all_rooms(&user_id, &db).await?; + } + + RoomMessageEventContent::text_plain(format!( + "User {} has been deactivated", + user_id + )) + } else { + RoomMessageEventContent::text_plain(format!( + "User {} doesn't exist on this server", + user_id + )) + } + } + AdminCommand::DeactivateAll { leave_rooms, force } => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let usernames = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut user_ids: Vec<&UserId> = Vec::new(); + + for &username in &usernames { + match <&UserId>::try_from(username) { + Ok(user_id) => user_ids.push(user_id), + Err(_) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "{} is not a valid username", + username + ))) + } + } + } + + let mut deactivation_count = 0; + let mut admins = Vec::new(); + + if !force { + user_ids.retain(|&user_id| { + match db.users.is_admin(user_id, &db.rooms, &db.globals) { + Ok(is_admin) => match is_admin { + true => { + admins.push(user_id.localpart()); + false + } + false => true, + }, + Err(_) => false, + } + }) + } + + for &user_id in &user_ids { + match db.users.deactivate_account(user_id) { + Ok(_) => deactivation_count += 1, + Err(_) => {} + } + } + + if leave_rooms { + for &user_id in &user_ids { + let _ = db.rooms.leave_all_rooms(user_id, &db).await; + } + } + + if admins.is_empty() { + RoomMessageEventContent::text_plain(format!( + "Deactivated {} accounts.", + deactivation_count + )) + } else { + RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } }; Ok(reply_message_content) diff --git a/src/database/rooms.rs b/src/database/rooms.rs index 7b3b7506..4ad815e3 100644 --- a/src/database/rooms.rs +++ b/src/database/rooms.rs @@ -2569,6 +2569,27 @@ impl Rooms { } } + // Make a user leave all their joined rooms + #[tracing::instrument(skip(self, db))] + pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { + let all_rooms = db + .rooms + .rooms_joined(user_id) + .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .collect::>(); + + for room_id in all_rooms { + let room_id = match room_id { + Ok(room_id) => room_id, + Err(_) => continue, + }; + + let _ = self.leave_room(user_id, &room_id, db).await; + } + + Ok(()) + } + #[tracing::instrument(skip(self, db))] pub async fn leave_room( &self, From 1c31f7905f4781be5dd99951a78508c9d3473636 Mon Sep 17 00:00:00 2001 From: Zeyphros Date: Sun, 19 Jun 2022 18:53:12 +0200 Subject: [PATCH 292/445] Update command comment to coincide with the default action --- src/database/admin.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/admin.rs b/src/database/admin.rs index 328c99ca..6f418ea8 100644 --- a/src/database/admin.rs +++ b/src/database/admin.rs @@ -240,8 +240,8 @@ enum AdminCommand { /// Deactivate a user /// - /// User will be removed from all rooms by default. - /// This behaviour can be overridden with the --no-leave-rooms flag. + /// User will not be removed from all rooms by default. + /// Use --leave-rooms to force the user to leave all rooms DeactivateUser { #[clap(short, long)] leave_rooms: bool, From 4dc14e15803035001a045db289fd847aaf63a0e2 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:34 +0200 Subject: [PATCH 293/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/directory.rs --- src/{database/rooms.rs => service/rooms/directory.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/directory.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/directory.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/directory.rs From 27e2f0d5458d886a86324af267628dae05bd288e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:34 +0200 Subject: [PATCH 294/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/alias.rs --- src/{database/rooms.rs => service/rooms/alias.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/alias.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/alias.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/alias.rs From bd7b49b098caf1aaf42649f69a3f57120ea57834 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:35 +0200 Subject: [PATCH 295/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/mod.rs --- src/{database/rooms.rs => service/rooms/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/mod.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/mod.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/mod.rs From baa8224cceab6944b1f2c44cd3f13d8dcd40e71e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:35 +0200 Subject: [PATCH 296/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/metadata.rs --- src/{database/rooms.rs => service/rooms/metadata.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/metadata.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/metadata.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/metadata.rs From 249440115bfc81dd37aac4ea066f805bd451c4d6 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:35 +0200 Subject: [PATCH 297/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/lazy_loading.rs --- src/{database/rooms.rs => service/rooms/lazy_loading.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/lazy_loading.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/lazy_loading.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/lazy_loading.rs From 2dbfbd45a25f59ae61b114e26af6059c388d9e5e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:36 +0200 Subject: [PATCH 298/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/short.rs --- src/{database/rooms.rs => service/rooms/short.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/short.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/short.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/short.rs From 8dffdadfd3cde3aa928a3d658a50386f7d707cc0 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:36 +0200 Subject: [PATCH 299/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/search.rs --- src/{database/rooms.rs => service/rooms/search.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/search.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/search.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/search.rs From 8ed79a00fd23a295794fc7fbc89501829c3a482f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:36 +0200 Subject: [PATCH 300/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/pdu_metadata.rs --- src/{database/rooms.rs => service/rooms/pdu_metadata.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/pdu_metadata.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/pdu_metadata.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/pdu_metadata.rs From 54bf91b76e9095f8d9b416c39d7ad8236a5f9e6f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:36 +0200 Subject: [PATCH 301/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/outlier.rs --- src/{database/rooms.rs => service/rooms/outlier.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/outlier.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/outlier.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/outlier.rs From d05b84d0f595da41330918f2ba3030dbc2f402b7 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:37 +0200 Subject: [PATCH 302/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/state_compressor.rs --- src/{database/rooms.rs => service/rooms/state_compressor.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/state_compressor.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/state_compressor.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/state_compressor.rs From 751be39376c1c147cb8e6f7a0facf67814397908 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:37 +0200 Subject: [PATCH 303/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/state_cache.rs --- src/{database/rooms.rs => service/rooms/state_cache.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/state_cache.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/state_cache.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/state_cache.rs From 64a022a4d2527170a675be3dd0f2964c6da65aec Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:37 +0200 Subject: [PATCH 304/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/state.rs --- src/{database/rooms.rs => service/rooms/state.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/state.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/state.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/state.rs From e22f5fef1f6623842ab99617c1231276b52a1633 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:38 +0200 Subject: [PATCH 305/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/user.rs --- src/{database/rooms.rs => service/rooms/user.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/user.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/user.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/user.rs From 7989c7cdda148bc804238522085489333e2c849e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 16:22:38 +0200 Subject: [PATCH 306/445] refactor: prepare splitting src/database/rooms.rs to src/service/rooms/timeline.rs --- src/{database/rooms.rs => service/rooms/timeline.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{database/rooms.rs => service/rooms/timeline.rs} (100%) diff --git a/src/database/rooms.rs b/src/service/rooms/timeline.rs similarity index 100% rename from src/database/rooms.rs rename to src/service/rooms/timeline.rs From 025b64befc5872aa7ffcc0ba348005e326d347d5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 19 Jun 2022 22:56:14 +0200 Subject: [PATCH 307/445] refactor: renames and split room.rs --- src/{ => api}/appservice_server.rs | 0 src/{ => api}/client_server/account.rs | 0 src/{ => api}/client_server/alias.rs | 0 src/{ => api}/client_server/backup.rs | 0 src/{ => api}/client_server/capabilities.rs | 0 src/{ => api}/client_server/config.rs | 0 src/{ => api}/client_server/context.rs | 0 src/{ => api}/client_server/device.rs | 0 src/{ => api}/client_server/directory.rs | 0 src/{ => api}/client_server/filter.rs | 0 src/{ => api}/client_server/keys.rs | 0 src/{ => api}/client_server/media.rs | 0 src/{ => api}/client_server/membership.rs | 206 + src/{ => api}/client_server/message.rs | 0 src/{ => api}/client_server/mod.rs | 0 src/{ => api}/client_server/presence.rs | 0 src/{ => api}/client_server/profile.rs | 0 src/{ => api}/client_server/push.rs | 0 src/{ => api}/client_server/read_marker.rs | 0 src/{ => api}/client_server/redact.rs | 0 src/{ => api}/client_server/report.rs | 0 src/{ => api}/client_server/room.rs | 21 + src/{ => api}/client_server/search.rs | 0 src/{ => api}/client_server/session.rs | 0 src/{ => api}/client_server/state.rs | 0 src/{ => api}/client_server/sync.rs | 0 src/{ => api}/client_server/tag.rs | 0 src/{ => api}/client_server/thirdparty.rs | 0 src/{ => api}/client_server/to_device.rs | 0 src/{ => api}/client_server/typing.rs | 0 src/{ => api}/client_server/unversioned.rs | 0 src/{ => api}/client_server/user_directory.rs | 0 src/{ => api}/client_server/voip.rs | 0 src/{ => api}/ruma_wrapper/axum.rs | 0 .../ruma_wrapper/mod.rs} | 0 src/{ => api}/server_server.rs | 0 src/{config.rs => config/mod.rs} | 0 src/{database.rs => database/mod.rs} | 0 src/{database => service}/account_data.rs | 0 src/{database => service}/admin.rs | 0 src/{database => service}/appservice.rs | 0 src/{database => service}/globals.rs | 0 src/{database => service}/key_backups.rs | 0 src/{database => service}/media.rs | 0 src/{ => service}/pdu.rs | 0 src/{database => service}/pusher.rs | 0 src/service/rooms/alias.rs | 3437 ---------------- src/service/rooms/directory.rs | 3474 ---------------- src/{database => service}/rooms/edus.rs | 0 src/service/rooms/lazy_loading.rs | 3412 ---------------- src/service/rooms/metadata.rs | 3459 ---------------- src/service/rooms/mod.rs | 3307 +--------------- src/service/rooms/outlier.rs | 3483 ----------------- src/service/rooms/pdu_metadata.rs | 3472 ---------------- src/service/rooms/search.rs | 3453 ---------------- src/service/rooms/short.rs | 3341 +--------------- src/service/rooms/state.rs | 2996 +------------- src/service/rooms/state_cache.rs | 2786 ------------- src/service/rooms/state_compressor.rs | 3288 +--------------- src/service/rooms/timeline.rs | 2787 +------------ src/service/rooms/user.rs | 3477 +--------------- src/{database => service}/sending.rs | 0 src/{database => service}/transaction_ids.rs | 0 src/{database => service}/uiaa.rs | 0 src/{database => service}/users.rs | 0 src/{ => utils}/error.rs | 0 src/{ => utils}/utils.rs | 0 67 files changed, 438 insertions(+), 45961 deletions(-) rename src/{ => api}/appservice_server.rs (100%) rename src/{ => api}/client_server/account.rs (100%) rename src/{ => api}/client_server/alias.rs (100%) rename src/{ => api}/client_server/backup.rs (100%) rename src/{ => api}/client_server/capabilities.rs (100%) rename src/{ => api}/client_server/config.rs (100%) rename src/{ => api}/client_server/context.rs (100%) rename src/{ => api}/client_server/device.rs (100%) rename src/{ => api}/client_server/directory.rs (100%) rename src/{ => api}/client_server/filter.rs (100%) rename src/{ => api}/client_server/keys.rs (100%) rename src/{ => api}/client_server/media.rs (100%) rename src/{ => api}/client_server/membership.rs (83%) rename src/{ => api}/client_server/message.rs (100%) rename src/{ => api}/client_server/mod.rs (100%) rename src/{ => api}/client_server/presence.rs (100%) rename src/{ => api}/client_server/profile.rs (100%) rename src/{ => api}/client_server/push.rs (100%) rename src/{ => api}/client_server/read_marker.rs (100%) rename src/{ => api}/client_server/redact.rs (100%) rename src/{ => api}/client_server/report.rs (100%) rename src/{ => api}/client_server/room.rs (96%) rename src/{ => api}/client_server/search.rs (100%) rename src/{ => api}/client_server/session.rs (100%) rename src/{ => api}/client_server/state.rs (100%) rename src/{ => api}/client_server/sync.rs (100%) rename src/{ => api}/client_server/tag.rs (100%) rename src/{ => api}/client_server/thirdparty.rs (100%) rename src/{ => api}/client_server/to_device.rs (100%) rename src/{ => api}/client_server/typing.rs (100%) rename src/{ => api}/client_server/unversioned.rs (100%) rename src/{ => api}/client_server/user_directory.rs (100%) rename src/{ => api}/client_server/voip.rs (100%) rename src/{ => api}/ruma_wrapper/axum.rs (100%) rename src/{ruma_wrapper.rs => api/ruma_wrapper/mod.rs} (100%) rename src/{ => api}/server_server.rs (100%) rename src/{config.rs => config/mod.rs} (100%) rename src/{database.rs => database/mod.rs} (100%) rename src/{database => service}/account_data.rs (100%) rename src/{database => service}/admin.rs (100%) rename src/{database => service}/appservice.rs (100%) rename src/{database => service}/globals.rs (100%) rename src/{database => service}/key_backups.rs (100%) rename src/{database => service}/media.rs (100%) rename src/{ => service}/pdu.rs (100%) rename src/{database => service}/pusher.rs (100%) rename src/{database => service}/rooms/edus.rs (100%) rename src/{database => service}/sending.rs (100%) rename src/{database => service}/transaction_ids.rs (100%) rename src/{database => service}/uiaa.rs (100%) rename src/{database => service}/users.rs (100%) rename src/{ => utils}/error.rs (100%) rename src/{ => utils}/utils.rs (100%) diff --git a/src/appservice_server.rs b/src/api/appservice_server.rs similarity index 100% rename from src/appservice_server.rs rename to src/api/appservice_server.rs diff --git a/src/client_server/account.rs b/src/api/client_server/account.rs similarity index 100% rename from src/client_server/account.rs rename to src/api/client_server/account.rs diff --git a/src/client_server/alias.rs b/src/api/client_server/alias.rs similarity index 100% rename from src/client_server/alias.rs rename to src/api/client_server/alias.rs diff --git a/src/client_server/backup.rs b/src/api/client_server/backup.rs similarity index 100% rename from src/client_server/backup.rs rename to src/api/client_server/backup.rs diff --git a/src/client_server/capabilities.rs b/src/api/client_server/capabilities.rs similarity index 100% rename from src/client_server/capabilities.rs rename to src/api/client_server/capabilities.rs diff --git a/src/client_server/config.rs b/src/api/client_server/config.rs similarity index 100% rename from src/client_server/config.rs rename to src/api/client_server/config.rs diff --git a/src/client_server/context.rs b/src/api/client_server/context.rs similarity index 100% rename from src/client_server/context.rs rename to src/api/client_server/context.rs diff --git a/src/client_server/device.rs b/src/api/client_server/device.rs similarity index 100% rename from src/client_server/device.rs rename to src/api/client_server/device.rs diff --git a/src/client_server/directory.rs b/src/api/client_server/directory.rs similarity index 100% rename from src/client_server/directory.rs rename to src/api/client_server/directory.rs diff --git a/src/client_server/filter.rs b/src/api/client_server/filter.rs similarity index 100% rename from src/client_server/filter.rs rename to src/api/client_server/filter.rs diff --git a/src/client_server/keys.rs b/src/api/client_server/keys.rs similarity index 100% rename from src/client_server/keys.rs rename to src/api/client_server/keys.rs diff --git a/src/client_server/media.rs b/src/api/client_server/media.rs similarity index 100% rename from src/client_server/media.rs rename to src/api/client_server/media.rs diff --git a/src/client_server/membership.rs b/src/api/client_server/membership.rs similarity index 83% rename from src/client_server/membership.rs rename to src/api/client_server/membership.rs index a1b616be..4dda11ad 100644 --- a/src/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -1080,3 +1080,209 @@ pub(crate) async fn invite_helper<'a>( Ok(()) } + + // Make a user leave all their joined rooms + #[tracing::instrument(skip(self, db))] + pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { + let all_rooms = db + .rooms + .rooms_joined(user_id) + .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .collect::>(); + + for room_id in all_rooms { + let room_id = match room_id { + Ok(room_id) => room_id, + Err(_) => continue, + }; + + let _ = self.leave_room(user_id, &room_id, db).await; + } + + Ok(()) + } + + #[tracing::instrument(skip(self, db))] + pub async fn leave_room( + &self, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + ) -> Result<()> { + // Ask a remote server if we don't have this room + if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { + if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { + warn!("Failed to leave room {} remotely: {}", user_id, e); + // Don't tell the client about this error + } + + let last_state = self + .invite_state(user_id, room_id)? + .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; + + // We always drop the invite, we can't rely on other servers + self.update_membership( + room_id, + user_id, + MembershipState::Leave, + user_id, + last_state, + db, + true, + )?; + } else { + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let mut event: RoomMemberEventContent = serde_json::from_str( + self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = MembershipState::Leave; + + self.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + db, + &state_lock, + )?; + } + + Ok(()) + } + + #[tracing::instrument(skip(self, db))] + async fn remote_leave_room( + &self, + user_id: &UserId, + room_id: &RoomId, + db: &Database, + ) -> Result<()> { + let mut make_leave_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in leaving.", + )); + + let invite_state = db + .rooms + .invite_state(user_id, room_id)? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "User is not invited.", + ))?; + + let servers: HashSet<_> = invite_state + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect(); + + for remote_server in servers { + let make_leave_response = db + .sending + .send_federation_request( + &db.globals, + &remote_server, + federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let room_version_id = match make_leave_response.room_version { + Some(version) if self.is_supported_version(&db, &version) => version, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + + let mut leave_event_stub = + serde_json::from_str::(make_leave_response.event.get()).map_err( + |_| Error::BadServerResponse("Invalid make_leave event json received from server."), + )?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + leave_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut leave_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + leave_event_stub.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + db.sending + .send_federation_request( + &db.globals, + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id, + event_id: &event_id, + pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + }, + ) + .await?; + + Ok(()) + } + diff --git a/src/client_server/message.rs b/src/api/client_server/message.rs similarity index 100% rename from src/client_server/message.rs rename to src/api/client_server/message.rs diff --git a/src/client_server/mod.rs b/src/api/client_server/mod.rs similarity index 100% rename from src/client_server/mod.rs rename to src/api/client_server/mod.rs diff --git a/src/client_server/presence.rs b/src/api/client_server/presence.rs similarity index 100% rename from src/client_server/presence.rs rename to src/api/client_server/presence.rs diff --git a/src/client_server/profile.rs b/src/api/client_server/profile.rs similarity index 100% rename from src/client_server/profile.rs rename to src/api/client_server/profile.rs diff --git a/src/client_server/push.rs b/src/api/client_server/push.rs similarity index 100% rename from src/client_server/push.rs rename to src/api/client_server/push.rs diff --git a/src/client_server/read_marker.rs b/src/api/client_server/read_marker.rs similarity index 100% rename from src/client_server/read_marker.rs rename to src/api/client_server/read_marker.rs diff --git a/src/client_server/redact.rs b/src/api/client_server/redact.rs similarity index 100% rename from src/client_server/redact.rs rename to src/api/client_server/redact.rs diff --git a/src/client_server/report.rs b/src/api/client_server/report.rs similarity index 100% rename from src/client_server/report.rs rename to src/api/client_server/report.rs diff --git a/src/client_server/room.rs b/src/api/client_server/room.rs similarity index 96% rename from src/client_server/room.rs rename to src/api/client_server/room.rs index a5b79705..5ae7224c 100644 --- a/src/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -707,3 +707,24 @@ pub async fn upgrade_room_route( // Return the replacement room id Ok(upgrade_room::v3::Response { replacement_room }) } + + /// Returns the room's version. + #[tracing::instrument(skip(self))] + pub fn get_room_version(&self, room_id: &RoomId) -> Result { + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + let room_version = create_event_content + .map(|create_event| create_event.room_version) + .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; + Ok(room_version) + } + diff --git a/src/client_server/search.rs b/src/api/client_server/search.rs similarity index 100% rename from src/client_server/search.rs rename to src/api/client_server/search.rs diff --git a/src/client_server/session.rs b/src/api/client_server/session.rs similarity index 100% rename from src/client_server/session.rs rename to src/api/client_server/session.rs diff --git a/src/client_server/state.rs b/src/api/client_server/state.rs similarity index 100% rename from src/client_server/state.rs rename to src/api/client_server/state.rs diff --git a/src/client_server/sync.rs b/src/api/client_server/sync.rs similarity index 100% rename from src/client_server/sync.rs rename to src/api/client_server/sync.rs diff --git a/src/client_server/tag.rs b/src/api/client_server/tag.rs similarity index 100% rename from src/client_server/tag.rs rename to src/api/client_server/tag.rs diff --git a/src/client_server/thirdparty.rs b/src/api/client_server/thirdparty.rs similarity index 100% rename from src/client_server/thirdparty.rs rename to src/api/client_server/thirdparty.rs diff --git a/src/client_server/to_device.rs b/src/api/client_server/to_device.rs similarity index 100% rename from src/client_server/to_device.rs rename to src/api/client_server/to_device.rs diff --git a/src/client_server/typing.rs b/src/api/client_server/typing.rs similarity index 100% rename from src/client_server/typing.rs rename to src/api/client_server/typing.rs diff --git a/src/client_server/unversioned.rs b/src/api/client_server/unversioned.rs similarity index 100% rename from src/client_server/unversioned.rs rename to src/api/client_server/unversioned.rs diff --git a/src/client_server/user_directory.rs b/src/api/client_server/user_directory.rs similarity index 100% rename from src/client_server/user_directory.rs rename to src/api/client_server/user_directory.rs diff --git a/src/client_server/voip.rs b/src/api/client_server/voip.rs similarity index 100% rename from src/client_server/voip.rs rename to src/api/client_server/voip.rs diff --git a/src/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs similarity index 100% rename from src/ruma_wrapper/axum.rs rename to src/api/ruma_wrapper/axum.rs diff --git a/src/ruma_wrapper.rs b/src/api/ruma_wrapper/mod.rs similarity index 100% rename from src/ruma_wrapper.rs rename to src/api/ruma_wrapper/mod.rs diff --git a/src/server_server.rs b/src/api/server_server.rs similarity index 100% rename from src/server_server.rs rename to src/api/server_server.rs diff --git a/src/config.rs b/src/config/mod.rs similarity index 100% rename from src/config.rs rename to src/config/mod.rs diff --git a/src/database.rs b/src/database/mod.rs similarity index 100% rename from src/database.rs rename to src/database/mod.rs diff --git a/src/database/account_data.rs b/src/service/account_data.rs similarity index 100% rename from src/database/account_data.rs rename to src/service/account_data.rs diff --git a/src/database/admin.rs b/src/service/admin.rs similarity index 100% rename from src/database/admin.rs rename to src/service/admin.rs diff --git a/src/database/appservice.rs b/src/service/appservice.rs similarity index 100% rename from src/database/appservice.rs rename to src/service/appservice.rs diff --git a/src/database/globals.rs b/src/service/globals.rs similarity index 100% rename from src/database/globals.rs rename to src/service/globals.rs diff --git a/src/database/key_backups.rs b/src/service/key_backups.rs similarity index 100% rename from src/database/key_backups.rs rename to src/service/key_backups.rs diff --git a/src/database/media.rs b/src/service/media.rs similarity index 100% rename from src/database/media.rs rename to src/service/media.rs diff --git a/src/pdu.rs b/src/service/pdu.rs similarity index 100% rename from src/pdu.rs rename to src/service/pdu.rs diff --git a/src/database/pusher.rs b/src/service/pusher.rs similarity index 100% rename from src/database/pusher.rs rename to src/service/pusher.rs diff --git a/src/service/rooms/alias.rs b/src/service/rooms/alias.rs index 4ad815e3..393ad671 100644 --- a/src/service/rooms/alias.rs +++ b/src/service/rooms/alias.rs @@ -1,2795 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } #[tracing::instrument(skip(self, globals))] pub fn set_alias( @@ -2856,648 +64,3 @@ impl Rooms { }) } - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/directory.rs b/src/service/rooms/directory.rs index 4ad815e3..8be7bd57 100644 --- a/src/service/rooms/directory.rs +++ b/src/service/rooms/directory.rs @@ -1,2860 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { @@ -2872,18 +15,6 @@ impl Rooms { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { @@ -2896,608 +27,3 @@ impl Rooms { }) } - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/database/rooms/edus.rs b/src/service/rooms/edus.rs similarity index 100% rename from src/database/rooms/edus.rs rename to src/service/rooms/edus.rs diff --git a/src/service/rooms/lazy_loading.rs b/src/service/rooms/lazy_loading.rs index 4ad815e3..a402702a 100644 --- a/src/service/rooms/lazy_loading.rs +++ b/src/service/rooms/lazy_loading.rs @@ -1,3395 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } #[tracing::instrument(skip(self))] pub fn lazy_load_was_sent_before( @@ -3481,23 +89,3 @@ impl Rooms { Ok(()) } - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/metadata.rs b/src/service/rooms/metadata.rs index 4ad815e3..5d703451 100644 --- a/src/service/rooms/metadata.rs +++ b/src/service/rooms/metadata.rs @@ -1,331 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn exists(&self, room_id: &RoomId) -> Result { @@ -343,430 +15,6 @@ impl Rooms { .is_some()) } - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { self.roomid_shortroomid .get(room_id.as_bytes())? @@ -777,43 +25,6 @@ impl Rooms { .transpose() } - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - pub fn get_or_create_shortroomid( &self, room_id: &RoomId, @@ -831,2673 +42,3 @@ impl Rooms { }) } - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 4ad815e3..89598afe 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -141,135 +141,6 @@ impl Rooms { db.globals.supported_room_versions().contains(room_version) } - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - /// This fetches auth events from the current state. #[tracing::instrument(skip(self))] pub fn get_auth_events( @@ -326,3178 +197,20 @@ impl Rooms { hash.as_ref().into() } - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) + pub fn iter_ids(&self) -> impl Iterator>> + '_ { + self.roomid_shortroomid.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) }) } - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) + pub fn is_disabled(&self, room_id: &RoomId) -> Result { + Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) } - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } } diff --git a/src/service/rooms/outlier.rs b/src/service/rooms/outlier.rs index 4ad815e3..afb0a147 100644 --- a/src/service/rooms/outlier.rs +++ b/src/service/rooms/outlier.rs @@ -1,1228 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -1243,2261 +18,3 @@ impl Rooms { ) } - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/pdu_metadata.rs b/src/service/rooms/pdu_metadata.rs index 4ad815e3..f8ffcee1 100644 --- a/src/service/rooms/pdu_metadata.rs +++ b/src/service/rooms/pdu_metadata.rs @@ -1,1183 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { @@ -1190,32 +10,6 @@ impl Rooms { Ok(()) } - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - #[tracing::instrument(skip(self))] pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { let mut key = room_id.as_bytes().to_vec(); @@ -1223,26 +17,6 @@ impl Rooms { Ok(self.referencedevents.get(&key)?.is_some()) } - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - #[tracing::instrument(skip(self))] pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { self.softfailedeventids.insert(event_id.as_bytes(), &[]) @@ -1255,2249 +29,3 @@ impl Rooms { .map(|o| o.is_some()) } - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/search.rs b/src/service/rooms/search.rs index 4ad815e3..ce055058 100644 --- a/src/service/rooms/search.rs +++ b/src/service/rooms/search.rs @@ -1,2900 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( @@ -2945,559 +48,3 @@ impl Rooms { })) } - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/short.rs b/src/service/rooms/short.rs index 4ad815e3..63e8b713 100644 --- a/src/service/rooms/short.rs +++ b/src/service/rooms/short.rs @@ -1,741 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } pub fn get_or_create_shorteventid( &self, @@ -767,16 +29,6 @@ impl Rooms { Ok(short) } - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - pub fn get_shortstatekey( &self, event_type: &StateEventType, @@ -814,23 +66,6 @@ impl Rooms { Ok(short) } - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - pub fn get_or_create_shortstatekey( &self, event_type: &StateEventType, @@ -940,2564 +175,24 @@ impl Rooms { Ok(result) } - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash( &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + state_hash: &StateHashId, + globals: &super::globals::Globals, + ) -> Result<(u64, bool)> { + Ok(match self.statehash_shortstatehash.get(state_hash)? { + Some(shortstatehash) => ( + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, + true, + ), + None => { + let shortstatehash = globals.next_count()?; + self.statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) + } + }) } - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/state.rs b/src/service/rooms/state.rs index 4ad815e3..4c75467f 100644 --- a/src/service/rooms/state.rs +++ b/src/service/rooms/state.rs @@ -1,145 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. @@ -270,101 +128,6 @@ impl Rooms { }) } - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - /// Force the creation of a new StateHash and insert it into the db. /// /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. @@ -474,472 +237,6 @@ impl Rooms { Ok(()) } - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - /// Returns the full room state. #[tracing::instrument(skip(self))] pub async fn room_state_full( @@ -983,185 +280,6 @@ impl Rooms { } } - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - /// Returns the leaf pdus of a room. #[tracing::instrument(skip(self))] pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { @@ -1179,17 +297,6 @@ impl Rooms { .collect() } - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - /// Replace the leaves of a room. /// /// The provided `event_ids` become the new leaves, this allows a room to have multiple @@ -1216,377 +323,7 @@ impl Rooms { Ok(()) } - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. + /// Generates a new StateHash and associates it with the incoming event. /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. @@ -1770,1734 +507,3 @@ impl Rooms { Ok(()) } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/state_cache.rs b/src/service/rooms/state_cache.rs index 4ad815e3..e7f457e6 100644 --- a/src/service/rooms/state_cache.rs +++ b/src/service/rooms/state_cache.rs @@ -1,2220 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } /// Update current membership data. #[tracing::instrument(skip(self, last_state, db))] @@ -2569,211 +352,6 @@ impl Rooms { } } - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - /// Makes a user forget a room. #[tracing::instrument(skip(self))] pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { @@ -2791,198 +369,6 @@ impl Rooms { Ok(()) } - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - /// Returns an iterator of all servers participating in this room. #[tracing::instrument(skip(self))] pub fn room_servers<'a>( @@ -3166,10 +552,6 @@ impl Rooms { .transpose() } - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - /// Returns an iterator over all rooms this user joined. #[tracing::instrument(skip(self))] pub fn rooms_joined<'a>( @@ -3333,171 +715,3 @@ impl Rooms { Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/state_compressor.rs b/src/service/rooms/state_compressor.rs index 4ad815e3..a56c0f5f 100644 --- a/src/service/rooms/state_compressor.rs +++ b/src/service/rooms/state_compressor.rs @@ -1,478 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] @@ -716,2788 +241,61 @@ impl Rooms { Ok(()) } - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); + #[tracing::instrument(skip(self))] + pub fn get_auth_chain_from_cache<'a>( + &'a self, + key: &[u64], + ) -> Result>>> { + // Check RAM cache + if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { + return Ok(Some(Arc::clone(result))); } - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } + // Check DB cache + if key.len() == 1 { + if let Some(chain) = + self.shorteventid_authchain + .get(&key[0].to_be_bytes())? + .map(|chain| { + chain + .chunks_exact(size_of::()) + .map(|chunk| { + utils::u64_from_bytes(chunk).expect("byte length is correct") + }) + .collect() + }) + { + let chain = Arc::new(chain); - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(vec![key[0]], Arc::clone(&chain)); - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey + return Ok(Some(chain)); } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); } - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } + Ok(None) } - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) + pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { + // Persist in db + if key.len() == 1 { + self.shorteventid_authchain.insert( + &key[0].to_be_bytes(), + &chain + .iter() + .flat_map(|s| s.to_be_bytes().to_vec()) + .collect::>(), + )?; } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } + // Cache in RAM + self.auth_chain_cache.lock().unwrap().insert(key, chain); - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + Ok(()) } - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/timeline.rs b/src/service/rooms/timeline.rs index 4ad815e3..fd93344c 100644 --- a/src/service/rooms/timeline.rs +++ b/src/service/rooms/timeline.rs @@ -1,347 +1,3 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } /// Checks if a room exists. #[tracing::instrument(skip(self))] @@ -365,688 +21,97 @@ impl Rooms { .transpose() } - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache + pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + match self + .lasttimelinecount_cache .lock() .unwrap() - .get_mut(&shortstatehash) + .entry(room_id.to_owned()) { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); + hash_map::Entry::Vacant(v) => { + if let Some(last_count) = self + .pdus_until(&sender_user, &room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .map(|(pduid, _)| self.pdu_count(&pduid)) + .next() + { + Ok(*v.insert(last_count?)) + } else { + Ok(0) } - // Else it was removed in the parent and we added it again. We can forget this change } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); + hash_map::Entry::Occupied(o) => Ok(*o.get()), } + } - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } + // TODO Is this the same as the function above? + #[tracing::instrument(skip(self))] + pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); - Ok(()) - } + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) + self.pduid_pdu + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .next() + .map(|b| self.pdu_count(&b.0)) + .transpose() + .map(|op| op.unwrap_or_default()) } - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - Ok(short) + /// Returns the `count` of this pdu's id. + pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map(|pdu_id| self.pdu_count(&pdu_id)) + .transpose() } - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + /// Returns the json of a pdu. + pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map_or_else( + || self.eventid_outlierpdu.get(event_id.as_bytes()), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) .transpose() } - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) + /// Returns the json of a pdu. + pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map(|pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) + .transpose() } - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( + /// Returns the json of a pdu. + pub fn get_non_outlier_pdu_json( &self, event_id: &EventId, ) -> Result> { @@ -1145,6 +210,12 @@ impl Rooms { }) } + /// Returns the `count` of this pdu's id. + pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { + utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) + } + /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { @@ -1162,99 +233,6 @@ impl Rooms { } } - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - /// Creates a new persisted data unit and adds it to a room. /// /// By this point the incoming event should be fully authenticated, no auth happens @@ -1512,321 +490,31 @@ impl Rooms { Ok(pdu_id) } - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( + /// Creates a new persisted data unit and adds it to a room. + #[tracing::instrument(skip(self, db, _mutex_lock))] + pub fn build_and_append_pdu( &self, - event_id: &EventId, + pdu_builder: PduBuilder, + sender: &UserId, room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; + db: &Database, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex + ) -> Result> { + let PduBuilder { + event_type, + content, + unsigned, + state_key, + redacts, + } = pdu_builder; - let previous_shortstatehash = self.current_shortstatehash(room_id)?; + let prev_events = self + .get_pdu_leaves(room_id)? + .into_iter() + .take(20) + .collect::>(); - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -2216,1288 +904,3 @@ impl Rooms { Ok(()) } - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/service/rooms/user.rs b/src/service/rooms/user.rs index 4ad815e3..976ab5b3 100644 --- a/src/service/rooms/user.rs +++ b/src/service/rooms/user.rs @@ -1,2948 +1,77 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) - } - - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - pub fn get_or_create_shortstatekey( - &self, - event_type: &StateEventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); + self.userroomid_notificationcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - RoomAccountDataEventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.to_owned()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - GlobalAccountDataEventType::IgnoredUserList - .to_string() - .into(), - )? - .map_or(false, |ignored| { - ignored - .content - .ignored_users - .iter() - .any(|user| user == sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; + .insert(&userroom_id, &0_u64.to_be_bytes())?; Ok(()) } - /// Makes a user forget a room. #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { + pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? + self.userroomid_notificationcount + .get(&userroom_id)? .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid notification count in db.")) }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) + .unwrap_or(Ok(0)) } #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } + pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) + self.userroomid_highlightcount + .get(&userroom_id)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid highlight count in db.")) + }) + .unwrap_or(Ok(0)) } - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, + pub fn associate_token_shortstatehash( + &self, room_id: &RoomId, - search_string: &str, - ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); + token: u64, + shortstatehash: u64, + ) -> Result<()> { + let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(&token.to_be_bytes()); - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); + self.roomsynctoken_shortstatehash + .insert(&key, &shortstatehash.to_be_bytes()) + } - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); + pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(&token.to_be_bytes()); - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) + self.roomsynctoken_shortstatehash + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") + }) + }) + .transpose() } #[tracing::instrument(skip(self))] @@ -2983,521 +112,3 @@ impl Rooms { })) } - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } - - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } -} diff --git a/src/database/sending.rs b/src/service/sending.rs similarity index 100% rename from src/database/sending.rs rename to src/service/sending.rs diff --git a/src/database/transaction_ids.rs b/src/service/transaction_ids.rs similarity index 100% rename from src/database/transaction_ids.rs rename to src/service/transaction_ids.rs diff --git a/src/database/uiaa.rs b/src/service/uiaa.rs similarity index 100% rename from src/database/uiaa.rs rename to src/service/uiaa.rs diff --git a/src/database/users.rs b/src/service/users.rs similarity index 100% rename from src/database/users.rs rename to src/service/users.rs diff --git a/src/error.rs b/src/utils/error.rs similarity index 100% rename from src/error.rs rename to src/utils/error.rs diff --git a/src/utils.rs b/src/utils/utils.rs similarity index 100% rename from src/utils.rs rename to src/utils/utils.rs From d0cbe46ff079998cf419a7f657da56f47432ffba Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:07:33 +0200 Subject: [PATCH 308/445] refactor: prepare splitting src/service/rooms/state.rs to src/service/rooms/state/mod.rs --- src/service/rooms/{state.rs => state/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/{state.rs => state/mod.rs} (100%) diff --git a/src/service/rooms/state.rs b/src/service/rooms/state/mod.rs similarity index 100% rename from src/service/rooms/state.rs rename to src/service/rooms/state/mod.rs From 9efd9f06c6c274ad56bc70c9e176e7f59f74272c Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:07:33 +0200 Subject: [PATCH 309/445] refactor: prepare splitting src/service/rooms/state.rs to src/service/rooms/state/data.rs --- src/service/rooms/{state.rs => state/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/{state.rs => state/data.rs} (100%) diff --git a/src/service/rooms/state.rs b/src/service/rooms/state/data.rs similarity index 100% rename from src/service/rooms/state.rs rename to src/service/rooms/state/data.rs From 7d2b22f58de3059e5244bf0cc657d7703ff6f245 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:08:33 +0200 Subject: [PATCH 310/445] refactor: prepare splitting src/service/rooms/state.rs to src/service/rooms/state_accessor/mod.rs --- src/service/rooms/{state.rs => state_accessor/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/{state.rs => state_accessor/mod.rs} (100%) diff --git a/src/service/rooms/state.rs b/src/service/rooms/state_accessor/mod.rs similarity index 100% rename from src/service/rooms/state.rs rename to src/service/rooms/state_accessor/mod.rs From e1d8c03e474883c6ffce09a1713241f5d5cb828f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:09:15 +0200 Subject: [PATCH 311/445] refactor: prepare splitting src/api/server_server.rs to src/service/rooms/event_handler/mod.rs --- src/{api/server_server.rs => service/rooms/event_handler/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{api/server_server.rs => service/rooms/event_handler/mod.rs} (100%) diff --git a/src/api/server_server.rs b/src/service/rooms/event_handler/mod.rs similarity index 100% rename from src/api/server_server.rs rename to src/service/rooms/event_handler/mod.rs From 57c92f80445ce908a47f6cc9745f7048b1f020cb Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 17:09:22 +0200 Subject: [PATCH 312/445] refactor: restore src/api/server_server.rs --- src/api/server_server.rs | 3644 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 3644 insertions(+) create mode 100644 src/api/server_server.rs diff --git a/src/api/server_server.rs b/src/api/server_server.rs new file mode 100644 index 00000000..6fa83e4c --- /dev/null +++ b/src/api/server_server.rs @@ -0,0 +1,3644 @@ +use crate::{ + client_server::{self, claim_keys_helper, get_keys_helper}, + database::{rooms::CompressedStateEvent, DatabaseGuard}, + pdu::EventHash, + utils, Database, Error, PduEvent, Result, Ruma, +}; +use axum::{response::IntoResponse, Json}; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use get_profile_information::v1::ProfileField; +use http::header::{HeaderValue, AUTHORIZATION}; +use regex::Regex; +use ruma::{ + api::{ + client::error::{Error as RumaError, ErrorKind}, + federation::{ + authorization::get_event_authorization, + device::get_devices::{self, v1::UserDevice}, + directory::{get_public_rooms, get_public_rooms_filtered}, + discovery::{ + get_remote_server_keys, get_remote_server_keys_batch, + get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys, + get_server_version, ServerSigningKeys, VerifyKey, + }, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + keys::{claim_keys, get_keys}, + membership::{ + create_invite, + create_join_event::{self, RoomState}, + prepare_join_event, + }, + query::{get_profile_information, get_room_information}, + transactions::{ + edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, + send_transaction_message, + }, + }, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, + SendAccessToken, + }, + directory::{IncomingFilter, IncomingRoomNetwork}, + events::{ + receipt::{ReceiptEvent, ReceiptEventContent}, + room::{ + create::RoomCreateEventContent, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + server_acl::RoomServerAclEventContent, + }, + RoomEventType, StateEventType, + }, + int, + receipt::ReceiptType, + serde::{Base64, JsonObject, Raw}, + signatures::{CanonicalJsonObject, CanonicalJsonValue}, + state_res::{self, RoomVersion, StateMap}, + to_device::DeviceIdOrAllDevices, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + ServerSigningKeyId, +}; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use std::{ + collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + fmt::Debug, + future::Future, + mem, + net::{IpAddr, SocketAddr}, + ops::Deref, + pin::Pin, + sync::{Arc, RwLock, RwLockWriteGuard}, + time::{Duration, Instant, SystemTime}, +}; +use tokio::sync::{MutexGuard, Semaphore}; +use tracing::{debug, error, info, trace, warn}; + +/// Wraps either an literal IP address plus port, or a hostname plus complement +/// (colon-plus-port if it was specified). +/// +/// Note: A `FedDest::Named` might contain an IP address in string form if there +/// was no port specified to construct a SocketAddr with. +/// +/// # Examples: +/// ```rust +/// # use conduit::server_server::FedDest; +/// # fn main() -> Result<(), std::net::AddrParseError> { +/// FedDest::Literal("198.51.100.3:8448".parse()?); +/// FedDest::Literal("[2001:db8::4:5]:443".parse()?); +/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); +/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); +/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); +/// # Ok(()) +/// # } +/// ``` +#[derive(Clone, Debug, PartialEq)] +pub enum FedDest { + Literal(SocketAddr), + Named(String, String), +} + +impl FedDest { + fn into_https_string(self) -> String { + match self { + Self::Literal(addr) => format!("https://{}", addr), + Self::Named(host, port) => format!("https://{}{}", host, port), + } + } + + fn into_uri_string(self) -> String { + match self { + Self::Literal(addr) => addr.to_string(), + Self::Named(host, ref port) => host + port, + } + } + + fn hostname(&self) -> String { + match &self { + Self::Literal(addr) => addr.ip().to_string(), + Self::Named(host, _) => host.clone(), + } + } + + fn port(&self) -> Option { + match &self { + Self::Literal(addr) => Some(addr.port()), + Self::Named(_, port) => port[1..].parse().ok(), + } + } +} + +#[tracing::instrument(skip(globals, request))] +pub(crate) async fn send_request( + globals: &crate::database::globals::Globals, + destination: &ServerName, + request: T, +) -> Result +where + T: Debug, +{ + if !globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut write_destination_to_cache = false; + + let cached_result = globals + .actual_destination_cache + .read() + .unwrap() + .get(destination) + .cloned(); + + let (actual_destination, host) = if let Some(result) = cached_result { + result + } else { + write_destination_to_cache = true; + + let result = find_actual_destination(globals, destination).await; + + (result.0, result.1.into_uri_string()) + }; + + let actual_destination_str = actual_destination.clone().into_https_string(); + + let mut http_request = request + .try_into_http_request::>( + &actual_destination_str, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) + .map_err(|e| { + warn!( + "Failed to find destination {}: {}", + actual_destination_str, e + ); + Error::BadServerResponse("Invalid destination") + })?; + + let mut request_map = serde_json::Map::new(); + + if !http_request.body().is_empty() { + request_map.insert( + "content".to_owned(), + serde_json::from_slice(http_request.body()) + .expect("body is valid json, we just created it"), + ); + }; + + request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); + request_map.insert( + "uri".to_owned(), + http_request + .uri() + .path_and_query() + .expect("all requests have a path") + .to_string() + .into(), + ); + request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); + request_map.insert("destination".to_owned(), destination.as_str().into()); + + let mut request_json = + serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); + + ruma::signatures::sign_json( + globals.server_name().as_str(), + globals.keypair(), + &mut request_json, + ) + .expect("our request json is what ruma expects"); + + let request_json: serde_json::Map = + serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); + + let signatures = request_json["signatures"] + .as_object() + .unwrap() + .values() + .map(|v| { + v.as_object() + .unwrap() + .iter() + .map(|(k, v)| (k, v.as_str().unwrap())) + }); + + for signature_server in signatures { + for s in signature_server { + http_request.headers_mut().insert( + AUTHORIZATION, + HeaderValue::from_str(&format!( + "X-Matrix origin={},key=\"{}\",sig=\"{}\"", + globals.server_name(), + s.0, + s.1 + )) + .unwrap(), + ); + } + } + + let reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + let url = reqwest_request.url().clone(); + + let response = globals.federation_client().execute(reqwest_request).await; + + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { + warn!( + "{} {}: {}", + url, + status, + String::from_utf8_lossy(&body) + .lines() + .collect::>() + .join(" ") + ); + } + + let http_response = http_response_builder + .body(body) + .expect("reqwest body is valid http body"); + + if status == 200 { + let response = T::IncomingResponse::try_from_http_response(http_response); + if response.is_ok() && write_destination_to_cache { + globals.actual_destination_cache.write().unwrap().insert( + Box::::from(destination), + (actual_destination, host), + ); + } + + response.map_err(|e| { + warn!( + "Invalid 200 response from {} on: {} {}", + &destination, url, e + ); + Error::BadServerResponse("Server returned bad 200 response.") + }) + } else { + Err(Error::FederationError( + destination.to_owned(), + RumaError::try_from_http_response(http_response).map_err(|e| { + warn!( + "Invalid {} response from {} on: {} {}", + status, &destination, url, e + ); + Error::BadServerResponse("Server returned bad error response.") + })?, + )) + } + } + Err(e) => Err(e.into()), + } +} + +fn get_ip_with_port(destination_str: &str) -> Option { + if let Ok(destination) = destination_str.parse::() { + Some(FedDest::Literal(destination)) + } else if let Ok(ip_addr) = destination_str.parse::() { + Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) + } else { + None + } +} + +fn add_port_to_hostname(destination_str: &str) -> FedDest { + let (host, port) = match destination_str.find(':') { + None => (destination_str, ":8448"), + Some(pos) => destination_str.split_at(pos), + }; + FedDest::Named(host.to_owned(), port.to_owned()) +} + +/// Returns: actual_destination, host header +/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names +/// Numbers in comments below refer to bullet points in linked section of specification +#[tracing::instrument(skip(globals))] +async fn find_actual_destination( + globals: &crate::database::globals::Globals, + destination: &'_ ServerName, +) -> (FedDest, FedDest) { + let destination_str = destination.as_str().to_owned(); + let mut hostname = destination_str.clone(); + let actual_destination = match get_ip_with_port(&destination_str) { + Some(host_port) => { + // 1: IP literal with provided or default port + host_port + } + None => { + if let Some(pos) = destination_str.find(':') { + // 2: Hostname with included port + let (host, port) = destination_str.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + match request_well_known(globals, destination.as_str()).await { + // 3: A .well-known file is available + Some(delegated_hostname) => { + hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); + match get_ip_with_port(&delegated_hostname) { + Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file + None => { + if let Some(pos) = delegated_hostname.find(':') { + // 3.2: Hostname with port in .well-known file + let (host, port) = delegated_hostname.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + // Delegated hostname has no port in this branch + if let Some(hostname_override) = + query_srv_record(globals, &delegated_hostname).await + { + // 3.3: SRV lookup successful + let force_port = hostname_override.port(); + + if let Ok(override_ip) = globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + globals.tls_name_override.write().unwrap().insert( + delegated_hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + FedDest::Named(delegated_hostname, format!(":{}", port)) + } else { + add_port_to_hostname(&delegated_hostname) + } + } else { + // 3.4: No SRV records, just use the hostname from .well-known + add_port_to_hostname(&delegated_hostname) + } + } + } + } + } + // 4: No .well-known or an error occured + None => { + match query_srv_record(globals, &destination_str).await { + // 4: SRV record found + Some(hostname_override) => { + let force_port = hostname_override.port(); + + if let Ok(override_ip) = globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + globals.tls_name_override.write().unwrap().insert( + hostname.clone(), + (override_ip.iter().collect(), force_port.unwrap_or(8448)), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + FedDest::Named(hostname.clone(), format!(":{}", port)) + } else { + add_port_to_hostname(&hostname) + } + } + // 5: No SRV record found + None => add_port_to_hostname(&destination_str), + } + } + } + } + } + }; + + // Can't use get_ip_with_port here because we don't want to add a port + // to an IP address if it wasn't specified + let hostname = if let Ok(addr) = hostname.parse::() { + FedDest::Literal(addr) + } else if let Ok(addr) = hostname.parse::() { + FedDest::Named(addr.to_string(), ":8448".to_owned()) + } else if let Some(pos) = hostname.find(':') { + let (host, port) = hostname.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + FedDest::Named(hostname, ":8448".to_owned()) + }; + (actual_destination, hostname) +} + +#[tracing::instrument(skip(globals))] +async fn query_srv_record( + globals: &crate::database::globals::Globals, + hostname: &'_ str, +) -> Option { + if let Ok(Some(host_port)) = globals + .dns_resolver() + .srv_lookup(format!("_matrix._tcp.{}", hostname)) + .await + .map(|srv| { + srv.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()), + ) + }) + }) + { + Some(host_port) + } else { + None + } +} + +#[tracing::instrument(skip(globals))] +async fn request_well_known( + globals: &crate::database::globals::Globals, + destination: &str, +) -> Option { + let body: serde_json::Value = serde_json::from_str( + &globals + .default_client() + .get(&format!( + "https://{}/.well-known/matrix/server", + destination + )) + .send() + .await + .ok()? + .text() + .await + .ok()?, + ) + .ok()?; + Some(body.get("m.server")?.as_str()?.to_owned()) +} + +/// # `GET /_matrix/federation/v1/version` +/// +/// Get version information on this server. +pub async fn get_server_version_route( + db: DatabaseGuard, + _body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + Ok(get_server_version::v1::Response { + server: Some(get_server_version::v1::Server { + name: Some("Conduit".to_owned()), + version: Some(env!("CARGO_PKG_VERSION").to_owned()), + }), + }) +} + +/// # `GET /_matrix/key/v2/server` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by this will be valid +/// forever. +// Response type for this endpoint is Json because we need to calculate a signature for the response +pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); + verify_keys.insert( + format!("ed25519:{}", db.globals.keypair().version()) + .try_into() + .expect("found invalid server signing keys in DB"), + VerifyKey { + key: Base64::new(db.globals.keypair().public_key().to_vec()), + }, + ); + let mut response = serde_json::from_slice( + get_server_keys::v2::Response { + server_key: Raw::new(&ServerSigningKeys { + server_name: db.globals.server_name().to_owned(), + verify_keys, + old_verify_keys: BTreeMap::new(), + signatures: BTreeMap::new(), + valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(86400 * 7), + ) + .expect("time is valid"), + }) + .expect("static conversion, no errors"), + } + .try_into_http_response::>() + .unwrap() + .body(), + ) + .unwrap(); + + ruma::signatures::sign_json( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut response, + ) + .unwrap(); + + Ok(Json(response)) +} + +/// # `GET /_matrix/key/v2/server/{keyId}` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by this will be valid +/// forever. +pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoResponse { + get_server_keys_route(db).await +} + +/// # `POST /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. +pub async fn get_public_rooms_filtered_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let response = client_server::get_public_rooms_filtered_helper( + &db, + None, + body.limit, + body.since.as_deref(), + &body.filter, + &body.room_network, + ) + .await?; + + Ok(get_public_rooms_filtered::v1::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +/// # `GET /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. +pub async fn get_public_rooms_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let response = client_server::get_public_rooms_filtered_helper( + &db, + None, + body.limit, + body.since.as_deref(), + &IncomingFilter::default(), + &IncomingRoomNetwork::Matrix, + ) + .await?; + + Ok(get_public_rooms::v1::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +/// # `PUT /_matrix/federation/v1/send/{txnId}` +/// +/// Push EDUs and PDUs to this server. +pub async fn send_transaction_message_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let mut resolved_map = BTreeMap::new(); + + let pub_key_map = RwLock::new(BTreeMap::new()); + + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. + // TODO: make this persist across requests but not in a DB Tree (in globals?) + // TODO: This could potentially also be some sort of trie (suffix tree) like structure so + // that once an auth event is known it would know (using indexes maybe) all of the auth + // events that it references. + // let mut auth_cache = EventMap::new(); + + for pdu in &body.pdus { + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + continue; + } + }; + + // 0. Check the server is in the room + let room_id = match value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + { + Some(id) => id, + None => { + // Event is invalid + resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned())); + continue; + } + }; + + acl_check(&sender_servername, &room_id, &db)?; + + let mutex = Arc::clone( + db.globals + .roomid_mutex_federation + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let start_time = Instant::now(); + resolved_map.insert( + event_id.clone(), + handle_incoming_pdu( + &sender_servername, + &event_id, + &room_id, + value, + true, + &db, + &pub_key_map, + ) + .await + .map(|_| ()), + ); + drop(mutex_lock); + + let elapsed = start_time.elapsed(); + warn!( + "Handling transaction of event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + + for pdu in &resolved_map { + if let Err(e) = pdu.1 { + if e != "Room is unknown to this server." { + warn!("Incoming PDU failed {:?}", pdu); + } + } + } + + for edu in body + .edus + .iter() + .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) + { + match edu { + Edu::Presence(_) => {} + Edu::Receipt(receipt) => { + for (room_id, room_updates) in receipt.receipts { + for (user_id, user_updates) in room_updates.read { + if let Some((event_id, _)) = user_updates + .event_ids + .iter() + .filter_map(|id| { + db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) + }) + .max_by_key(|(_, count)| *count) + { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert(user_id.clone(), user_updates.data); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event_id.to_owned(), receipts); + + let event = ReceiptEvent { + content: ReceiptEventContent(receipt_content), + room_id: room_id.clone(), + }; + db.rooms.edus.readreceipt_update( + &user_id, + &room_id, + event, + &db.globals, + )?; + } else { + // TODO fetch missing events + info!("No known event ids in read receipt: {:?}", user_updates); + } + } + } + } + Edu::Typing(typing) => { + if db.rooms.is_joined(&typing.user_id, &typing.room_id)? { + if typing.typing { + db.rooms.edus.typing_add( + &typing.user_id, + &typing.room_id, + 3000 + utils::millis_since_unix_epoch(), + &db.globals, + )?; + } else { + db.rooms.edus.typing_remove( + &typing.user_id, + &typing.room_id, + &db.globals, + )?; + } + } + } + Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { + db.users + .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; + } + Edu::DirectToDevice(DirectDeviceContent { + sender, + ev_type, + message_id, + messages, + }) => { + // Check if this is a new transaction id + if db + .transaction_ids + .existing_txnid(&sender, None, &message_id)? + .is_some() + { + continue; + } + + for (target_user_id, map) in &messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + DeviceIdOrAllDevices::DeviceId(target_device_id) => { + db.users.add_to_device_event( + &sender, + target_user_id, + target_device_id, + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + &db.globals, + )? + } + + DeviceIdOrAllDevices::AllDevices => { + for target_device_id in db.users.all_device_ids(target_user_id) { + db.users.add_to_device_event( + &sender, + target_user_id, + &target_device_id?, + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + &db.globals, + )?; + } + } + } + } + } + + // Save transaction id with empty data + db.transaction_ids + .add_txnid(&sender, None, &message_id, &[])?; + } + Edu::SigningKeyUpdate(SigningKeyUpdateContent { + user_id, + master_key, + self_signing_key, + }) => { + if user_id.server_name() != sender_servername { + continue; + } + if let Some(master_key) = master_key { + db.users.add_cross_signing_keys( + &user_id, + &master_key, + &self_signing_key, + &None, + &db.rooms, + &db.globals, + )?; + } + } + Edu::_Custom(_) => {} + } + } + + db.flush()?; + + Ok(send_transaction_message::v1::Response { pdus: resolved_map }) +} + +/// An async function that can recursively call itself. +type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; + +/// When receiving an event one needs to: +/// 0. Check the server is in the room +/// 1. Skip the PDU if we already know about it +/// 2. Check signatures, otherwise drop +/// 3. Check content hash, redact if doesn't match +/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not +/// timeline events +/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are +/// also rejected "due to auth events" +/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events +/// 7. Persist this event as an outlier +/// 8. If not timeline event: stop +/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline +/// events +/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities +/// doing all the checks in this list starting at 1. These are not timeline events +/// 11. Check the auth of the event passes based on the state of the event +/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by +/// doing state res where one of the inputs was a previously trusted set of state, don't just +/// trust a set of state we got from a remote) +/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" +/// it +/// 14. Use state resolution to find new room state +// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively +#[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] +pub(crate) async fn handle_incoming_pdu<'a>( + origin: &'a ServerName, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + is_timeline_event: bool, + db: &'a Database, + pub_key_map: &'a RwLock>>, +) -> Result>, String> { + match db.rooms.exists(room_id) { + Ok(true) => {} + _ => { + return Err("Room is unknown to this server.".to_owned()); + } + } + + match db.rooms.is_disabled(room_id) { + Ok(false) => {} + _ => { + return Err("Federation of this room is currently disabled on this server.".to_owned()); + } + } + + // 1. Skip the PDU if we already have it as a timeline event + if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { + return Ok(Some(pdu_id.to_vec())); + } + + let create_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomCreate, "") + .map_err(|_| "Failed to ask database for event.".to_owned())? + .ok_or_else(|| "Failed to find create event in db.".to_owned())?; + + let first_pdu_in_room = db + .rooms + .first_pdu_in_room(room_id) + .map_err(|_| "Error loading first room event.".to_owned())? + .expect("Room exists"); + + let (incoming_pdu, val) = handle_outlier_pdu( + origin, + &create_event, + event_id, + room_id, + value, + db, + pub_key_map, + ) + .await?; + + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(None); + } + + if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(None); + } + + // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let mut graph: HashMap, _> = HashMap::new(); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = incoming_pdu.prev_events.clone(); + + let mut amount = 0; + + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, json_opt)) = fetch_and_handle_outliers( + db, + origin, + &[prev_event_id.clone()], + &create_event, + room_id, + pub_key_map, + ) + .await + .pop() + { + if amount > 100 { + // Max limit reached + warn!("Max prev event limit reached!"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + } + + if let Some(json) = + json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) + { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount += 1; + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(dbg!(prev_prev.clone())); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } + + let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), + )) + }) + .map_err(|_| "Error sorting prev events".to_owned())?; + + let mut errors = 0; + for prev_id in dbg!(sorted) { + match db.rooms.is_disabled(room_id) { + Ok(false) => {} + _ => { + return Err( + "Federation of this room is currently disabled on this server.".to_owned(), + ); + } + } + + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&*prev_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", prev_id); + continue; + } + } + + if errors >= 5 { + break; + } + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { + if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + continue; + } + + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + if let Err(e) = upgrade_outlier_to_timeline_pdu( + pdu, + json, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await + { + errors += 1; + warn!("Prev event {} failed: {}", prev_id, e); + match db + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } + } + let elapsed = start_time.elapsed(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + warn!( + "Handling prev event {} took {}m{}s", + prev_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + } + + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + let r = upgrade_outlier_to_timeline_pdu( + incoming_pdu, + val, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await; + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + + r +} + +#[tracing::instrument(skip(create_event, value, db, pub_key_map))] +fn handle_outlier_pdu<'a>( + origin: &'a ServerName, + create_event: &'a PduEvent, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + db: &'a Database, + pub_key_map: &'a RwLock>>, +) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { + Box::pin(async move { + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + fetch_required_signing_keys(&value, pub_key_map, db) + .await + .map_err(|e| e.to_string())?; + + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match + + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; + + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + + let mut val = match ruma::signatures::verify_event( + &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, + &value, + room_version_id, + ) { + Err(e) => { + // Drop + warn!("Dropping bad event {}: {}", event_id, e); + return Err("Signature verification failed".to_owned()); + } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + warn!("Calculated hash does not match: {}", event_id); + match ruma::signatures::redact(&value, room_version_id) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_owned()), + } + } + Ok(ruma::signatures::Verified::All) => value, + }; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + let incoming_pdu = serde_json::from_value::( + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU.".to_owned())?; + + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // EDIT: Step 5 is not applied anymore because it failed too often + warn!("Fetching auth events for {}", incoming_pdu.event_id); + fetch_and_handle_outliers( + db, + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; + + // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + info!( + "Auth check for {} based on auth events", + incoming_pdu.event_id + ); + + // Build map of auth events + let mut auth_events = HashMap::new(); + for id in &incoming_pdu.auth_events { + let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? { + Some(e) => e, + None => { + warn!("Could not find auth event {}", id); + continue; + } + }; + + match auth_events.entry(( + auth_event.kind.to_string().into(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + hash_map::Entry::Vacant(v) => { + v.insert(auth_event); + } + hash_map::Entry::Occupied(_) => { + return Err( + "Auth event's type and state_key combination exists multiple times." + .to_owned(), + ) + } + } + } + + // The original create event must be in the auth events + if auth_events + .get(&(StateEventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(create_event) + { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + if !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), + ) + .map_err(|_e| "Auth check failed".to_owned())? + { + return Err("Event has failed auth check with auth events.".to_owned()); + } + + info!("Validation successful."); + + // 7. Persist the event as an outlier. + db.rooms + .add_pdu_outlier(&incoming_pdu.event_id, &val) + .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; + info!("Added pdu as outlier."); + + Ok((Arc::new(incoming_pdu), val)) + }) +} + +#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] +async fn upgrade_outlier_to_timeline_pdu( + incoming_pdu: Arc, + val: BTreeMap, + create_event: &PduEvent, + origin: &ServerName, + db: &Database, + room_id: &RoomId, + pub_key_map: &RwLock>>, +) -> Result>, String> { + if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { + return Ok(Some(pduid)); + } + + if db + .rooms + .is_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to ask db for soft fail".to_owned())? + { + return Err("Event has been soft failed".into()); + } + + info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); + + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + "Invalid create event in db.".to_owned() + })?; + + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + // doing all the checks in this list starting at 1. These are not timeline events. + + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event + + info!("Requesting state at event"); + let mut state_at_incoming_event = None; + + if incoming_pdu.prev_events.len() == 1 { + let prev_event = &*incoming_pdu.prev_events[0]; + let prev_event_sstatehash = db + .rooms + .pdu_shortstatehash(prev_event) + .map_err(|_| "Failed talking to db".to_owned())?; + + let state = if let Some(shortstatehash) = prev_event_sstatehash { + Some(db.rooms.state_full_ids(shortstatehash).await) + } else { + None + }; + + if let Some(Ok(mut state)) = state { + info!("Using cached state"); + let prev_pdu = + db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { + "Could not find prev event, but we know the state.".to_owned() + })?; + + if let Some(state_key) = &prev_pdu.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + state.insert(shortstatekey, Arc::from(prev_event)); + // Now it's the state after the pdu + } + + state_at_incoming_event = Some(state); + } + } else { + info!("Calculating state at event using state res"); + let mut extremity_sstatehashes = HashMap::new(); + + let mut okay = true; + for prev_eventid in &incoming_pdu.prev_events { + let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { + pdu + } else { + okay = false; + break; + }; + + let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { + s + } else { + okay = false; + break; + }; + + extremity_sstatehashes.insert(sstatehash, prev_event); + } + + if okay { + let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + + for (sstatehash, prev_event) in extremity_sstatehashes { + let mut leaf_state: BTreeMap<_, _> = db + .rooms + .state_full_ids(sstatehash) + .await + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + // Now it's the state after the pdu + } + + let mut state = StateMap::with_capacity(leaf_state.len()); + let mut starting_events = Vec::with_capacity(leaf_state.len()); + + for (k, id) in leaf_state { + if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); + } else { + warn!("Failed to get_statekey_from_short."); + } + starting_events.push(id); + } + + auth_chain_sets.push( + get_auth_chain(room_id, starting_events, db) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); + + fork_states.push(state); + } + + let lock = db.globals.stateres_mutex.lock(); + + let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + drop(lock); + + state_at_incoming_event = match result { + Ok(new_state) => Some( + new_state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + Ok((shortstatekey, event_id)) + }) + .collect::>()?, + ), + Err(e) => { + warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); + None + } + } + } + } + + if state_at_incoming_event.is_none() { + info!("Calling /state_ids"); + // Call /state_ids to find out what the state at this pdu is. We trust the server's + // response to some extend, but we still do a lot of checks on the events + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_room_state_ids::v1::Request { + room_id, + event_id: &incoming_pdu.event_id, + }, + ) + .await + { + Ok(res) => { + info!("Fetching state events at event."); + let state_vec = fetch_and_handle_outliers( + db, + origin, + &res.pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; + + let mut state: BTreeMap<_, Arc> = BTreeMap::new(); + for (pdu, _) in state_vec { + let state_key = pdu + .state_key + .clone() + .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + match state.entry(shortstatekey) { + btree_map::Entry::Vacant(v) => { + v.insert(Arc::from(&*pdu.event_id)); + } + btree_map::Entry::Occupied(_) => return Err( + "State event's type and state_key combination exists multiple times." + .to_owned(), + ), + } + } + + // The original create event must still be in the state + let create_shortstatekey = db + .rooms + .get_shortstatekey(&StateEventType::RoomCreate, "") + .map_err(|_| "Failed to talk to db.")? + .expect("Room exists"); + + if state.get(&create_shortstatekey).map(|id| id.as_ref()) + != Some(&create_event.event_id) + { + return Err("Incoming event refers to wrong create event.".to_owned()); + } + + state_at_incoming_event = Some(state); + } + Err(e) => { + warn!("Fetching state for event failed: {}", e); + return Err("Fetching state for event failed".into()); + } + }; + } + + let state_at_incoming_event = + state_at_incoming_event.expect("we always set this to some above"); + + info!("Starting auth check"); + // 11. Check the auth of the event passes based on the state of the event + let check_result = state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| { + db.rooms + .get_shortstatekey(&k.to_string().into(), s) + .ok() + .flatten() + .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) + .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) + }, + ) + .map_err(|_e| "Auth check failed.".to_owned())?; + + if !check_result { + return Err("Event has failed auth check with state at the event.".into()); + } + info!("Auth check succeeded"); + + // We start looking at current room state now, so lets lock the room + + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Now we calculate the set of extremities this room has after the incoming event has been + // applied. We start with the previous extremities (aka leaves) + info!("Calculating extremities"); + let mut extremities = db + .rooms + .get_pdu_leaves(room_id) + .map_err(|_| "Failed to load room leaves".to_owned())?; + + // Remove any forward extremities that are referenced by this incoming event's prev_events + for prev_event in &incoming_pdu.prev_events { + if extremities.contains(prev_event) { + extremities.remove(prev_event); + } + } + + // Only keep those extremities were not referenced yet + extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); + + info!("Compressing state at event"); + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + db.rooms + .compress_state_event(*shortstatekey, id, &db.globals) + .map_err(|_| "Failed to compress_state_event".to_owned()) + }) + .collect::>()?; + + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + info!("Starting soft fail auth check"); + + let auth_events = db + .rooms + .get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .map_err(|_| "Failed to get_auth_events.".to_owned())?; + + let soft_fail = !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|_e| "Auth check failed.".to_owned())?; + + if soft_fail { + append_incoming_pdu( + db, + &incoming_pdu, + val, + extremities.iter().map(Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; + + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {:?}", incoming_pdu); + db.rooms + .mark_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to set soft failed flag".to_owned())?; + return Err("Event has been soft failed".into()); + } + + if incoming_pdu.state_key.is_some() { + info!("Loading current room state ids"); + let current_sstatehash = db + .rooms + .current_shortstatehash(room_id) + .map_err(|_| "Failed to load current state hash.".to_owned())? + .expect("every room has state"); + + let current_state_ids = db + .rooms + .state_full_ids(current_sstatehash) + .await + .map_err(|_| "Failed to load room state.")?; + + info!("Preparing for stateres to derive new room state"); + let mut extremity_sstatehashes = HashMap::new(); + + info!("Loading extremities"); + for id in dbg!(&extremities) { + match db + .rooms + .get_pdu(id) + .map_err(|_| "Failed to ask db for pdu.".to_owned())? + { + Some(leaf_pdu) => { + extremity_sstatehashes.insert( + db.rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?, + leaf_pdu, + ); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err("Missing state snapshot.".to_owned()); + } + } + } + + let mut fork_states = Vec::new(); + + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). + + // We do this by adding the current state to the list of fork states + extremity_sstatehashes.remove(¤t_sstatehash); + fork_states.push(current_state_ids); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + } + fork_states.push(state_after); + + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + return Err("State is empty.".to_owned()); + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + info!("State resolution trivial"); + // There was only one state, so it has to be the room's current state (because that is + // always included) + fork_states[0] + .iter() + .map(|(k, id)| { + db.rooms + .compress_state_event(*k, id, &db.globals) + .map_err(|_| "Failed to compress_state_event.".to_owned()) + }) + .collect::>()? + } else { + info!("Loading auth chains"); + // We do need to force an update to this room's state + update_state = true; + + let mut auth_chain_sets = Vec::new(); + for state in &fork_states { + auth_chain_sets.push( + get_auth_chain( + room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + db, + ) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); + } + + info!("Loading fork states"); + + let fork_states: Vec<_> = fork_states + .into_iter() + .map(|map| { + map.into_iter() + .filter_map(|(k, id)| { + db.rooms + .get_statekey_from_short(k) + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) + .ok() + }) + .collect::>() + }) + .collect(); + + info!("Resolving state"); + + let lock = db.globals.stateres_mutex.lock(); + let state = match state_res::resolve( + room_version_id, + &fork_states, + auth_chain_sets, + |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err("State resolution failed, either an event could not be found or deserialization".into()); + } + }; + + drop(lock); + + info!("State resolution done. Compressing state"); + + state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + db.rooms + .compress_state_event(shortstatekey, &event_id, &db.globals) + .map_err(|_| "Failed to compress state event".to_owned()) + }) + .collect::>()? + }; + + // Set the new room state to the resolved state + if update_state { + info!("Forcing new room state"); + db.rooms + .force_state(room_id, new_room_state, db) + .map_err(|_| "Failed to set new room state.".to_owned())?; + } + } + + info!("Appending pdu to timeline"); + extremities.insert(incoming_pdu.event_id.clone()); + + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + + let pdu_id = append_incoming_pdu( + db, + &incoming_pdu, + val, + extremities.iter().map(Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; + + info!("Appended incoming pdu"); + + // Event has passed all auth/stateres checks + drop(state_lock); + Ok(pdu_id) +} + +/// Find the event and auth it. Once the event is validated (steps 1 - 8) +/// it is appended to the outliers Tree. +/// +/// Returns pdu and if we fetched it over federation the raw json. +/// +/// a. Look in the main timeline (pduid_pdu tree) +/// b. Look at outlier pdu tree +/// c. Ask origin server over federation +/// d. TODO: Ask other servers over federation? +#[tracing::instrument(skip_all)] +pub(crate) fn fetch_and_handle_outliers<'a>( + db: &'a Database, + origin: &'a ServerName, + events: &'a [Arc], + create_event: &'a PduEvent, + room_id: &'a RoomId, + pub_key_map: &'a RwLock>>, +) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { + Box::pin(async move { + let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + let mut pdus = vec![]; + for id in events { + if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", id); + continue; + } + } + + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { + trace!("Found {} in db", id); + pdus.push((local_pdu, None)); + continue; + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); + let mut i = 0; + while let Some(next_id) = todo_auth_events.pop() { + if events_all.contains(&next_id) { + continue; + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { + trace!("Found {} in db", id); + continue; + } + + info!("Fetching {} over federation.", next_id); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &next_id }, + ) + .await + { + Ok(res) => { + info!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { + Ok(t) => t, + Err(_) => { + back_off((*next_id).to_owned()); + continue; + } + }; + + if calculated_event_id != *next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); + } + + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { + for auth_event in auth_events { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((*next_id).to_owned()); + } + } + } + + for (next_id, value) in events_in_reverse_order.iter().rev() { + match handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + db, + pub_key_map, + ) + .await + { + Ok((pdu, json)) => { + if next_id == id { + pdus.push((pdu, Some(json))); + } + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()); + } + } + } + } + pdus + }) +} + +/// Search the DB for the signing keys of the given server, if we don't have them +/// fetch them from the server and save to our DB. +#[tracing::instrument(skip_all)] +pub(crate) async fn fetch_signing_keys( + db: &Database, + origin: &ServerName, + signature_ids: Vec, +) -> Result> { + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let permit = db + .globals + .servername_ratelimiter + .read() + .unwrap() + .get(origin) + .map(|s| Arc::clone(s).acquire_owned()); + + let permit = match permit { + Some(p) => p, + None => { + let mut write = db.globals.servername_ratelimiter.write().unwrap(); + let s = Arc::clone( + write + .entry(origin.to_owned()) + .or_insert_with(|| Arc::new(Semaphore::new(1))), + ); + + s.acquire_owned() + } + } + .await; + + let back_off = |id| match db + .globals + .bad_signature_ratelimiter + .write() + .unwrap() + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + if let Some((time, tries)) = db + .globals + .bad_signature_ratelimiter + .read() + .unwrap() + .get(&signature_ids) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {:?}", signature_ids); + return Err(Error::BadServerResponse("bad signature, still backing off")); + } + } + + trace!("Loading signing keys for {}", origin); + + let mut result: BTreeMap<_, _> = db + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if contains_all_ids(&result) { + return Ok(result); + } + + debug!("Fetching signing keys for {} over federation", origin); + + if let Some(server_key) = db + .sending + .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) + { + db.globals.add_signing_key(origin, server_key.clone())?; + + result.extend( + server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + + for server in db.globals.trusted_servers() { + debug!("Asking {} for {}'s signing key", server, origin); + if let Some(server_keys) = db + .sending + .send_federation_request( + &db.globals, + server, + get_remote_server_keys::v2::Request::new( + origin, + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) + { + trace!("Got signing keys: {:?}", server_keys); + for k in server_keys { + db.globals.add_signing_key(origin, k.clone())?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); + } + } + } + + drop(permit); + + back_off(signature_ids); + + warn!("Failed to find public key for server: {}", origin); + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) +} + +/// Append the incoming event setting the state snapshot to the state from the +/// server that sent the event. +#[tracing::instrument(skip_all)] +fn append_incoming_pdu<'a>( + db: &Database, + pdu: &PduEvent, + pdu_json: CanonicalJsonObject, + new_room_leaves: impl IntoIterator + Clone + Debug, + state_ids_compressed: HashSet, + soft_fail: bool, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex +) -> Result>> { + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + db.rooms.set_event_state( + &pdu.event_id, + &pdu.room_id, + state_ids_compressed, + &db.globals, + )?; + + if soft_fail { + db.rooms + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + return Ok(None); + } + + let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; + + for appservice in db.appservice.all()? { + if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + continue; + } + + if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + let aliases = namespaces + .get("aliases") + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) + .collect::>() + }); + let rooms = namespaces + .get("rooms") + .and_then(|rooms| rooms.as_sequence()); + + let matching_users = |users: &Regex| { + users.is_match(pdu.sender.as_str()) + || pdu.kind == RoomEventType::RoomMember + && pdu + .state_key + .as_ref() + .map_or(false, |state_key| users.is_match(state_key)) + }; + let matching_aliases = |aliases: &Regex| { + db.rooms + .room_aliases(&pdu.room_id) + .filter_map(|r| r.ok()) + .any(|room_alias| aliases.is_match(room_alias.as_str())) + }; + + if aliases.iter().any(matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) + || users.iter().any(matching_users) + { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + } + } + + Ok(Some(pdu_id)) +} + +#[tracing::instrument(skip(starting_events, db))] +pub(crate) async fn get_auth_chain<'a>( + room_id: &RoomId, + starting_events: Vec>, + db: &'a Database, +) -> Result> + 'a> { + const NUM_BUCKETS: usize = 50; + + let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; + + let mut i = 0; + for id in starting_events { + let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; + let bucket_id = (short % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, id.clone())); + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + let mut full_auth_chain = HashSet::new(); + + let mut hits = 0; + let mut misses = 0; + for chunk in buckets { + if chunk.is_empty() { + continue; + } + + let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); + if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { + hits += 1; + full_auth_chain.extend(cached.iter().copied()); + continue; + } + misses += 1; + + let mut chunk_cache = HashSet::new(); + let mut hits2 = 0; + let mut misses2 = 0; + let mut i = 0; + for (sevent_id, event_id) in chunk { + if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { + hits2 += 1; + chunk_cache.extend(cached.iter().copied()); + } else { + misses2 += 1; + let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); + db.rooms + .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; + println!( + "cache missed event {} with auth chain len {}", + event_id, + auth_chain.len() + ); + chunk_cache.extend(auth_chain.iter()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + }; + } + println!( + "chunk missed with len {}, event hits2: {}, misses2: {}", + chunk_cache.len(), + hits2, + misses2 + ); + let chunk_cache = Arc::new(chunk_cache); + db.rooms + .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + full_auth_chain.extend(chunk_cache.iter()); + } + + println!( + "total: {}, chunk hits: {}, misses: {}", + full_auth_chain.len(), + hits, + misses + ); + + Ok(full_auth_chain + .into_iter() + .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) +} + +#[tracing::instrument(skip(event_id, db))] +fn get_auth_chain_inner( + room_id: &RoomId, + event_id: &EventId, + db: &Database, +) -> Result> { + let mut todo = vec![Arc::from(event_id)]; + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop() { + match db.rooms.get_pdu(&event_id) { + Ok(Some(pdu)) => { + if pdu.room_id != room_id { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); + } + for auth_event in &pdu.auth_events { + let sauthevent = db + .rooms + .get_or_create_shorteventid(auth_event, &db.globals)?; + + if !found.contains(&sauthevent) { + found.insert(sauthevent); + todo.push(auth_event.clone()); + } + } + } + Ok(None) => { + warn!("Could not find pdu mentioned in auth events: {}", event_id); + } + Err(e) => { + warn!("Could not load event in auth chain: {} {}", event_id, e); + } + } + } + + Ok(found) +} + +/// # `GET /_matrix/federation/v1/event/{eventId}` +/// +/// Retrieves a single event from the server. +/// +/// - Only works if a user of this server is currently invited or joined the room +pub async fn get_event_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let event = db + .rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if !db.rooms.server_in_room(sender_servername, room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); + } + + Ok(get_event::v1::Response { + origin: db.globals.server_name().to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdu: PduEvent::convert_to_outgoing_federation_event(event), + }) +} + +/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` +/// +/// Retrieves events that the sender is missing. +pub async fn get_missing_events_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + + let mut queued_events = body.latest_events.clone(); + let mut events = Vec::new(); + + let mut i = 0; + while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { + if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { + let room_id_str = pdu + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let event_room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if event_room_id != body.room_id { + warn!( + "Evil event detected: Event {} found while searching in room {}", + queued_events[i], body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Evil event detected", + )); + } + + if body.earliest_events.contains(&queued_events[i]) { + i += 1; + continue; + } + queued_events.extend_from_slice( + &serde_json::from_value::>>( + serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { + Error::bad_database("Event in db has no prev_events field.") + })?) + .expect("canonical json is valid json value"), + ) + .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, + ); + events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); + } + i += 1; + } + + Ok(get_missing_events::v1::Response { events }) +} + +/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` +/// +/// Retrieves the auth chain for a given event. +/// +/// - This does not include the event itself +pub async fn get_event_authorization_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + + let event = db + .rooms + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?; + + Ok(get_event_authorization::v1::Response { + auth_chain: auth_chain_ids + .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + }) +} + +/// # `GET /_matrix/federation/v1/state/{roomId}` +/// +/// Retrieves the current state of the room. +pub async fn get_room_state_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + + let shortstatehash = db + .rooms + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdus = db + .rooms + .state_full_ids(shortstatehash) + .await? + .into_iter() + .map(|(_, id)| { + PduEvent::convert_to_outgoing_federation_event( + db.rooms.get_pdu_json(&id).unwrap().unwrap(), + ) + }) + .collect(); + + let auth_chain_ids = + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; + + Ok(get_room_state::v1::Response { + auth_chain: auth_chain_ids + .map(|id| { + db.rooms.get_pdu_json(&id).map(|maybe_json| { + PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) + }) + }) + .filter_map(|r| r.ok()) + .collect(), + pdus, + }) +} + +/// # `GET /_matrix/federation/v1/state_ids/{roomId}` +/// +/// Retrieves the current state of the room. +pub async fn get_room_state_ids_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + acl_check(sender_servername, &body.room_id, &db)?; + + let shortstatehash = db + .rooms + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdu_ids = db + .rooms + .state_full_ids(shortstatehash) + .await? + .into_iter() + .map(|(_, id)| (*id).to_owned()) + .collect(); + + let auth_chain_ids = + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; + + Ok(get_room_state_ids::v1::Response { + auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), + pdu_ids, + }) +} + +/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` +/// +/// Creates a join template. +pub async fn create_join_event_template_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + if !db.rooms.exists(&body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server.", + )); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + acl_check(sender_servername, &body.room_id, &db)?; + + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = + db.rooms + .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Conduit does not support restricted rooms yet.", + )); + } + } + + let prev_events: Vec<_> = db + .rooms + .get_pdu_leaves(&body.room_id)? + .into_iter() + .take(20) + .collect(); + + let create_event = db + .rooms + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + + // If there was no create event yet, assume we are creating a room with the default version + // right now + let room_version_id = create_event_content + .map_or(db.globals.default_room_version(), |create_event| { + create_event.room_version + }); + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + + if !body.ver.contains(&room_version_id) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: room_version_id, + }, + "Room version not supported.", + )); + } + + let content = to_raw_value(&RoomMemberEventContent { + avatar_url: None, + blurhash: None, + displayname: None, + is_direct: None, + membership: MembershipState::Join, + third_party_invite: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("member event is valid value"); + + let state_key = body.user_id.to_string(); + let kind = StateEventType::RoomMember; + + let auth_events = db.rooms.get_auth_events( + &body.room_id, + &kind.to_string().into(), + &body.user_id, + Some(&state_key), + &content, + )?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) + .max() + .unwrap_or_else(|| uint!(0)) + + uint!(1); + + let mut unsigned = BTreeMap::new(); + + if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { + unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); + unsigned.insert( + "prev_sender".to_owned(), + to_raw_value(&prev_pdu.sender).expect("UserId is valid"), + ); + } + + let pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), + room_id: body.room_id.clone(), + sender: body.user_id.clone(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: kind.to_string().into(), + content, + state_key: Some(state_key), + prev_events, + depth, + auth_events: auth_events + .iter() + .map(|(_, pdu)| pdu.event_id.clone()) + .collect(), + redacts: None, + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + }, + hashes: EventHash { + sha256: "aaa".to_owned(), + }, + signatures: None, + }; + + let auth_check = state_res::auth_check( + &room_version, + &pdu, + None::, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })?; + + if !auth_check { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Event is not authorized.", + )); + } + + // Hash and sign + let mut pdu_json = + utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); + + pdu_json.remove("event_id"); + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), + ); + + Ok(prepare_join_event::v1::Response { + room_version: Some(room_version_id), + event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), + }) +} + +async fn create_join_event( + db: &DatabaseGuard, + sender_servername: &ServerName, + room_id: &RoomId, + pdu: &RawJsonValue, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + if !db.rooms.exists(room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server.", + )); + } + + acl_check(sender_servername, room_id, db)?; + + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Conduit does not support restricted rooms yet.", + )); + } + } + + // We need to return the state prior to joining, let's keep a reference to that here + let shortstatehash = db + .rooms + .current_shortstatehash(room_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pub_key_map = RwLock::new(BTreeMap::new()); + // let mut auth_cache = EventMap::new(); + + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + } + }; + + let origin: Box = serde_json::from_value( + serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event needs an origin field.", + ))?) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + + let mutex = Arc::clone( + db.globals + .roomid_mutex_federation + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map) + .await + .map_err(|e| { + warn!("Error while handling incoming send join PDU: {}", e); + Error::BadRequest( + ErrorKind::InvalidParam, + "Error while handling incoming PDU.", + ) + })? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + drop(mutex_lock); + + let state_ids = db.rooms.state_full_ids(shortstatehash).await?; + let auth_chain_ids = get_auth_chain( + room_id, + state_ids.iter().map(|(_, id)| id.clone()).collect(), + db, + ) + .await?; + + let servers = db + .rooms + .room_servers(room_id) + .filter_map(|r| r.ok()) + .filter(|server| &**server != db.globals.server_name()); + + db.sending.send_pdu(servers, &pdu_id)?; + + db.flush()?; + + Ok(RoomState { + auth_chain: auth_chain_ids + .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + state: state_ids + .iter() + .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + }) +} + +/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. +pub async fn create_join_event_v1_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_join_event::v1::Response { room_state }) +} + +/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. +pub async fn create_join_event_v2_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_join_event::v2::Response { room_state }) +} + +/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` +/// +/// Invites a remote user to a room. +pub async fn create_invite_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + acl_check(sender_servername, &body.room_id, &db)?; + + if !db.rooms.is_supported_version(&db, &body.room_version) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: body.room_version.clone(), + }, + "Server does not support this room version.", + )); + } + + let mut signed_event = utils::to_canonical_object(&body.event) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; + + ruma::signatures::hash_and_sign_event( + db.globals.server_name().as_str(), + db.globals.keypair(), + &mut signed_event, + &body.room_version, + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&signed_event, &body.room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + signed_event.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.into()), + ); + + let sender: Box<_> = serde_json::from_value( + signed_event + .get("sender") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no sender field.", + ))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; + + let invited_user: Box<_> = serde_json::from_value( + signed_event + .get("state_key") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no state_key field.", + ))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; + + let mut invite_state = body.invite_room_state.clone(); + + let mut event: JsonObject = serde_json::from_str(body.event.get()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + + event.insert("event_id".to_owned(), "$dummy".into()); + + let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { + warn!("Invalid invite event: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })?; + + invite_state.push(pdu.to_stripped_state_event()); + + // If the room already exists, the remote server will notify us about the join via /send + if !db.rooms.exists(&pdu.room_id)? { + db.rooms.update_membership( + &body.room_id, + &invited_user, + MembershipState::Invite, + &sender, + Some(invite_state), + &db, + true, + )?; + } + + db.flush()?; + + Ok(create_invite::v2::Response { + event: PduEvent::convert_to_outgoing_federation_event(signed_event), + }) +} + +/// # `GET /_matrix/federation/v1/user/devices/{userId}` +/// +/// Gets information on all devices of the user. +pub async fn get_devices_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + Ok(get_devices::v1::Response { + user_id: body.user_id.clone(), + stream_id: db + .users + .get_devicelist_version(&body.user_id)? + .unwrap_or(0) + .try_into() + .expect("version will not grow that large"), + devices: db + .users + .all_devices_metadata(&body.user_id) + .filter_map(|r| r.ok()) + .filter_map(|metadata| { + Some(UserDevice { + keys: db + .users + .get_device_keys(&body.user_id, &metadata.device_id) + .ok()??, + device_id: metadata.device_id, + device_display_name: metadata.display_name, + }) + }) + .collect(), + master_key: db + .users + .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, + self_signing_key: db + .users + .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, + }) +} + +/// # `GET /_matrix/federation/v1/query/directory` +/// +/// Resolve a room alias to a room id. +pub async fn get_room_information_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let room_id = db + .rooms + .id_from_alias(&body.room_alias)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room alias not found.", + ))?; + + Ok(get_room_information::v1::Response { + room_id, + servers: vec![db.globals.server_name().to_owned()], + }) +} + +/// # `GET /_matrix/federation/v1/query/profile` +/// +/// Gets information on a profile. +pub async fn get_profile_information_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut displayname = None; + let mut avatar_url = None; + let mut blurhash = None; + + match &body.field { + Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, + Some(ProfileField::AvatarUrl) => { + avatar_url = db.users.avatar_url(&body.user_id)?; + blurhash = db.users.blurhash(&body.user_id)? + } + // TODO: what to do with custom + Some(_) => {} + None => { + displayname = db.users.displayname(&body.user_id)?; + avatar_url = db.users.avatar_url(&body.user_id)?; + blurhash = db.users.blurhash(&body.user_id)?; + } + } + + Ok(get_profile_information::v1::Response { + blurhash, + displayname, + avatar_url, + }) +} + +/// # `POST /_matrix/federation/v1/user/keys/query` +/// +/// Gets devices and identity keys for the given users. +pub async fn get_keys_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let result = get_keys_helper( + None, + &body.device_keys, + |u| Some(u.server_name()) == body.sender_servername.as_deref(), + &db, + ) + .await?; + + db.flush()?; + + Ok(get_keys::v1::Response { + device_keys: result.device_keys, + master_keys: result.master_keys, + self_signing_keys: result.self_signing_keys, + }) +} + +/// # `POST /_matrix/federation/v1/user/keys/claim` +/// +/// Claims one-time keys. +pub async fn claim_keys_route( + db: DatabaseGuard, + body: Ruma, +) -> Result { + if !db.globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let result = claim_keys_helper(&body.one_time_keys, &db).await?; + + db.flush()?; + + Ok(claim_keys::v1::Response { + one_time_keys: result.one_time_keys, + }) +} + +#[tracing::instrument(skip_all)] +pub(crate) async fn fetch_required_signing_keys( + event: &BTreeMap, + pub_key_map: &RwLock>>, + db: &Database, +) -> Result<()> { + let signatures = event + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let fetch_res = fetch_signing_keys( + db, + signature_server.as_str().try_into().map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?, + signature_ids, + ) + .await; + + let keys = match fetch_res { + Ok(keys) => keys, + Err(_) => { + warn!("Signature verification failed: Could not fetch signing key.",); + continue; + } + }; + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(signature_server.clone(), keys); + } + + Ok(()) +} + +// Gets a list of servers for which we don't have the signing key yet. We go over +// the PDUs and either cache the key or add it to the list that needs to be retrieved. +fn get_server_keys_from_cache( + pdu: &RawJsonValue, + servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, + room_version: &RoomVersionId, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + db: &Database, +) -> Result<()> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&value, room_version) + .expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); + + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + let signatures = value + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?; + + if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { + continue; + } + + trace!("Loading signing keys for {}", origin); + + let result: BTreeMap<_, _> = db + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if !contains_all_ids(&result) { + trace!("Signing key not loaded for {}", origin); + servers.insert(origin.to_owned(), BTreeMap::new()); + } + + pub_key_map.insert(origin.to_string(), result); + } + + Ok(()) +} + +pub(crate) async fn fetch_join_signing_keys( + event: &create_join_event::v2::Response, + room_version: &RoomVersionId, + pub_key_map: &RwLock>>, + db: &Database, +) -> Result<()> { + let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = + BTreeMap::new(); + + { + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + + // Try to fetch keys, failure is okay + // Servers we couldn't find in the cache will be added to `servers` + for pdu in &event.room_state.state { + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + } + for pdu in &event.room_state.auth_chain { + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + } + + drop(pkm); + } + + if servers.is_empty() { + // We had all keys locally + return Ok(()); + } + + for server in db.globals.trusted_servers() { + trace!("Asking batch signing keys from trusted server {}", server); + if let Ok(keys) = db + .sending + .send_federation_request( + &db.globals, + server, + get_remote_server_keys_batch::v2::Request { + server_keys: servers.clone(), + }, + ) + .await + { + trace!("Got signing keys: {:?}", keys); + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + for k in keys.server_keys { + let k = k.deserialize().unwrap(); + + // TODO: Check signature from trusted server? + servers.remove(&k.server_name); + + let result = db + .globals + .add_signing_key(&k.server_name, k.clone())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + pkm.insert(k.server_name.to_string(), result); + } + } + + if servers.is_empty() { + return Ok(()); + } + } + + let mut futures: FuturesUnordered<_> = servers + .into_iter() + .map(|(server, _)| async move { + ( + db.sending + .send_federation_request( + &db.globals, + &server, + get_server_keys::v2::Request::new(), + ) + .await, + server, + ) + }) + .collect(); + + while let Some(result) = futures.next().await { + if let (Ok(get_keys_response), origin) = result { + let result: BTreeMap<_, _> = db + .globals + .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(origin.to_string(), result); + } + } + + Ok(()) +} + +/// Returns Ok if the acl allows the server +fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { + let acl_event = match db + .rooms + .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? + { + Some(acl) => acl, + None => return Ok(()), + }; + + let acl_event_content: RoomServerAclEventContent = + match serde_json::from_str(acl_event.content.get()) { + Ok(content) => content, + Err(_) => { + warn!("Invalid ACL event"); + return Ok(()); + } + }; + + if acl_event_content.is_allowed(server_name) { + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server was denied by ACL", + )) + } +} + +#[cfg(test)] +mod tests { + use super::{add_port_to_hostname, get_ip_with_port, FedDest}; + + #[test] + fn ips_get_default_ports() { + assert_eq!( + get_ip_with_port("1.1.1.1"), + Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) + ); + assert_eq!( + get_ip_with_port("dead:beef::"), + Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) + ); + } + + #[test] + fn ips_keep_custom_ports() { + assert_eq!( + get_ip_with_port("1.1.1.1:1234"), + Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) + ); + assert_eq!( + get_ip_with_port("[dead::beef]:8933"), + Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) + ); + } + + #[test] + fn hostnames_get_default_ports() { + assert_eq!( + add_port_to_hostname("example.com"), + FedDest::Named(String::from("example.com"), String::from(":8448")) + ) + } + + #[test] + fn hostnames_keep_custom_ports() { + assert_eq!( + add_port_to_hostname("example.com:1337"), + FedDest::Named(String::from("example.com"), String::from(":1337")) + ) + } +} From dcdbcc08519643b7501b1e5695a9f211703f8b41 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 20 Jun 2022 11:31:27 +0200 Subject: [PATCH 313/445] refactor: event handling code --- src/api/client_server/membership.rs | 123 +- src/api/server_server.rs | 1356 +-------- src/service/rooms/{alias.rs => alias/mod.rs} | 0 .../rooms/{directory.rs => directory/mod.rs} | 0 src/service/rooms/{edus.rs => edus/mod.rs} | 0 src/service/rooms/event_handler/mod.rs | 2692 +---------------- .../{lazy_loading.rs => lazy_loading/mod.rs} | 0 .../rooms/{metadata.rs => metadata/mod.rs} | 0 .../rooms/{outlier.rs => outlier/mod.rs} | 11 +- .../{pdu_metadata.rs => pdu_metadata/mod.rs} | 0 .../rooms/{search.rs => search/mod.rs} | 0 src/service/rooms/{short.rs => short/mod.rs} | 0 src/service/rooms/state/data.rs | 499 +-- src/service/rooms/state/mod.rs | 179 +- src/service/rooms/state_accessor/mod.rs | 349 --- .../{state_cache.rs => state_cache/mod.rs} | 0 .../mod.rs} | 0 .../rooms/{timeline.rs => timeline/mod.rs} | 246 +- src/service/rooms/{user.rs => user/mod.rs} | 0 19 files changed, 265 insertions(+), 5190 deletions(-) rename src/service/rooms/{alias.rs => alias/mod.rs} (100%) rename src/service/rooms/{directory.rs => directory/mod.rs} (100%) rename src/service/rooms/{edus.rs => edus/mod.rs} (100%) rename src/service/rooms/{lazy_loading.rs => lazy_loading/mod.rs} (100%) rename src/service/rooms/{metadata.rs => metadata/mod.rs} (100%) rename src/service/rooms/{outlier.rs => outlier/mod.rs} (65%) rename src/service/rooms/{pdu_metadata.rs => pdu_metadata/mod.rs} (100%) rename src/service/rooms/{search.rs => search/mod.rs} (100%) rename src/service/rooms/{short.rs => short/mod.rs} (100%) rename src/service/rooms/{state_cache.rs => state_cache/mod.rs} (100%) rename src/service/rooms/{state_compressor.rs => state_compressor/mod.rs} (100%) rename src/service/rooms/{timeline.rs => timeline/mod.rs} (93%) rename src/service/rooms/{user.rs => user/mod.rs} (100%) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 4dda11ad..ecd26d1a 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -806,36 +806,6 @@ pub(crate) async fn invite_helper<'a>( ); let state_lock = mutex_state.lock().await; - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - let content = to_raw_value(&RoomMemberEventContent { avatar_url: None, displayname: None, @@ -851,98 +821,7 @@ pub(crate) async fn invite_helper<'a>( let state_key = user_id.to_string(); let kind = StateEventType::RoomMember; - let auth_events = db.rooms.get_auth_events( - room_id, - &kind.to_string().into(), - sender_user, - Some(&state_key), - &content, - )?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - to_raw_value(&prev_pdu.sender).expect("UserId is valid"), - ); - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender_user.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: kind.to_string().into(), - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) - .expect("event is valid, we just created it"); + let (pdu, pdu_json) = create_hash_and_sign_event(); let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 6fa83e4c..f60f735a 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -882,1163 +882,6 @@ pub async fn send_transaction_message_route( Ok(send_transaction_message::v1::Response { pdus: resolved_map }) } -/// An async function that can recursively call itself. -type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; - -/// When receiving an event one needs to: -/// 0. Check the server is in the room -/// 1. Skip the PDU if we already know about it -/// 2. Check signatures, otherwise drop -/// 3. Check content hash, redact if doesn't match -/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not -/// timeline events -/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are -/// also rejected "due to auth events" -/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events -/// 7. Persist this event as an outlier -/// 8. If not timeline event: stop -/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline -/// events -/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities -/// doing all the checks in this list starting at 1. These are not timeline events -/// 11. Check the auth of the event passes based on the state of the event -/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by -/// doing state res where one of the inputs was a previously trusted set of state, don't just -/// trust a set of state we got from a remote) -/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" -/// it -/// 14. Use state resolution to find new room state -// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively -#[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] -pub(crate) async fn handle_incoming_pdu<'a>( - origin: &'a ServerName, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - is_timeline_event: bool, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> Result>, String> { - match db.rooms.exists(room_id) { - Ok(true) => {} - _ => { - return Err("Room is unknown to this server.".to_owned()); - } - } - - match db.rooms.is_disabled(room_id) { - Ok(false) => {} - _ => { - return Err("Federation of this room is currently disabled on this server.".to_owned()); - } - } - - // 1. Skip the PDU if we already have it as a timeline event - if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { - return Ok(Some(pdu_id.to_vec())); - } - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.".to_owned())? - .ok_or_else(|| "Failed to find create event in db.".to_owned())?; - - let first_pdu_in_room = db - .rooms - .first_pdu_in_room(room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists"); - - let (incoming_pdu, val) = handle_outlier_pdu( - origin, - &create_event, - event_id, - room_id, - value, - db, - pub_key_map, - ) - .await?; - - // 8. if not timeline event: stop - if !is_timeline_event { - return Ok(None); - } - - if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - return Ok(None); - } - - // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let mut graph: HashMap, _> = HashMap::new(); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = incoming_pdu.prev_events.clone(); - - let mut amount = 0; - - while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() - { - if amount > 100 { - // Max limit reached - warn!("Max prev event limit reached!"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) - { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount += 1; - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); - } - } - - graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), - ); - } else { - // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } - - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - println!("{}", event_id); - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; - - let mut errors = 0; - for prev_id in dbg!(sorted) { - match db.rooms.is_disabled(room_id) { - Ok(false) => {} - _ => { - return Err( - "Federation of this room is currently disabled on this server.".to_owned(), - ); - } - } - - if let Some((time, tries)) = db - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&*prev_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", prev_id); - continue; - } - } - - if errors >= 5 { - break; - } - if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { - if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - continue; - } - - let start_time = Instant::now(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - if let Err(e) = upgrade_outlier_to_timeline_pdu( - pdu, - json, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await - { - errors += 1; - warn!("Prev event {} failed: {}", prev_id, e); - match db - .globals - .bad_event_ratelimiter - .write() - .unwrap() - .entry((*prev_id).to_owned()) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1 + 1) - } - } - } - let elapsed = start_time.elapsed(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .remove(&room_id.to_owned()); - warn!( - "Handling prev event {} took {}m{}s", - prev_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - } - - let start_time = Instant::now(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = upgrade_outlier_to_timeline_pdu( - incoming_pdu, - val, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await; - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .remove(&room_id.to_owned()); - - r -} - -#[tracing::instrument(skip(create_event, value, db, pub_key_map))] -fn handle_outlier_pdu<'a>( - origin: &'a ServerName, - create_event: &'a PduEvent, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { - Box::pin(async move { - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - fetch_required_signing_keys(&value, pub_key_map, db) - .await - .map_err(|e| e.to_string())?; - - // 2. Check signatures, otherwise drop - // 3. check content hash, redact if doesn't match - - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; - - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - - let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, - &value, - room_version_id, - ) { - Err(e) => { - // Drop - warn!("Dropping bad event {}: {}", event_id, e); - return Err("Signature verification failed".to_owned()); - } - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - warn!("Calculated hash does not match: {}", event_id); - match ruma::signatures::redact(&value, room_version_id) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_owned()), - } - } - Ok(ruma::signatures::Verified::All) => value, - }; - - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type - val.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU.".to_owned())?; - - // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // EDIT: Step 5 is not applied anymore because it failed too often - warn!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_outliers( - db, - origin, - &incoming_pdu - .auth_events - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; - - // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - info!( - "Auth check for {} based on auth events", - incoming_pdu.event_id - ); - - // Build map of auth events - let mut auth_events = HashMap::new(); - for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? { - Some(e) => e, - None => { - warn!("Could not find auth event {}", id); - continue; - } - }; - - match auth_events.entry(( - auth_event.kind.to_string().into(), - auth_event - .state_key - .clone() - .expect("all auth events have state keys"), - )) { - hash_map::Entry::Vacant(v) => { - v.insert(auth_event); - } - hash_map::Entry::Occupied(_) => { - return Err( - "Auth event's type and state_key combination exists multiple times." - .to_owned(), - ) - } - } - } - - // The original create event must be in the auth events - if auth_events - .get(&(StateEventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()) - != Some(create_event) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } - - if !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, // TODO: third party invite - |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed".to_owned())? - { - return Err("Event has failed auth check with auth events.".to_owned()); - } - - info!("Validation successful."); - - // 7. Persist the event as an outlier. - db.rooms - .add_pdu_outlier(&incoming_pdu.event_id, &val) - .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; - info!("Added pdu as outlier."); - - Ok((Arc::new(incoming_pdu), val)) - }) -} - -#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] -async fn upgrade_outlier_to_timeline_pdu( - incoming_pdu: Arc, - val: BTreeMap, - create_event: &PduEvent, - origin: &ServerName, - db: &Database, - room_id: &RoomId, - pub_key_map: &RwLock>>, -) -> Result>, String> { - if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { - return Ok(Some(pduid)); - } - - if db - .rooms - .is_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to ask db for soft fail".to_owned())? - { - return Err("Event has been soft failed".into()); - } - - info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); - - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; - - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - - // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities - // doing all the checks in this list starting at 1. These are not timeline events. - - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event - - info!("Requesting state at event"); - let mut state_at_incoming_event = None; - - if incoming_pdu.prev_events.len() == 1 { - let prev_event = &*incoming_pdu.prev_events[0]; - let prev_event_sstatehash = db - .rooms - .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())?; - - let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(db.rooms.state_full_ids(shortstatehash).await) - } else { - None - }; - - if let Some(Ok(mut state)) = state { - info!("Using cached state"); - let prev_pdu = - db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { - "Could not find prev event, but we know the state.".to_owned() - })?; - - if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &prev_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - state.insert(shortstatekey, Arc::from(prev_event)); - // Now it's the state after the pdu - } - - state_at_incoming_event = Some(state); - } - } else { - info!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::new(); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { - pdu - } else { - okay = false; - break; - }; - - let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { - s - } else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if okay { - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: BTreeMap<_, _> = db - .rooms - .state_full_ids(sstatehash) - .await - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &prev_event.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - - for (k, id) in leaf_state { - if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } else { - warn!("Failed to get_statekey_from_short."); - } - starting_events.push(id); - } - - auth_chain_sets.push( - get_auth_chain(room_id, starting_events, db) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), - ); - - fork_states.push(state); - } - - let lock = db.globals.stateres_mutex.lock(); - - let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }); - drop(lock); - - state_at_incoming_event = match result { - Ok(new_state) => Some( - new_state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, event_id)) - }) - .collect::>()?, - ), - Err(e) => { - warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); - None - } - } - } - } - - if state_at_incoming_event.is_none() { - info!("Calling /state_ids"); - // Call /state_ids to find out what the state at this pdu is. We trust the server's - // response to some extend, but we still do a lot of checks on the events - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id, - event_id: &incoming_pdu.event_id, - }, - ) - .await - { - Ok(res) => { - info!("Fetching state events at event."); - let state_vec = fetch_and_handle_outliers( - db, - origin, - &res.pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; - - let mut state: BTreeMap<_, Arc> = BTreeMap::new(); - for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; - - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - match state.entry(shortstatekey) { - btree_map::Entry::Vacant(v) => { - v.insert(Arc::from(&*pdu.event_id)); - } - btree_map::Entry::Occupied(_) => return Err( - "State event's type and state_key combination exists multiple times." - .to_owned(), - ), - } - } - - // The original create event must still be in the state - let create_shortstatekey = db - .rooms - .get_shortstatekey(&StateEventType::RoomCreate, "") - .map_err(|_| "Failed to talk to db.")? - .expect("Room exists"); - - if state.get(&create_shortstatekey).map(|id| id.as_ref()) - != Some(&create_event.event_id) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } - - state_at_incoming_event = Some(state); - } - Err(e) => { - warn!("Fetching state for event failed: {}", e); - return Err("Fetching state for event failed".into()); - } - }; - } - - let state_at_incoming_event = - state_at_incoming_event.expect("we always set this to some above"); - - info!("Starting auth check"); - // 11. Check the auth of the event passes based on the state of the event - let check_result = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, // TODO: third party invite - |k, s| { - db.rooms - .get_shortstatekey(&k.to_string().into(), s) - .ok() - .flatten() - .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) - }, - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if !check_result { - return Err("Event has failed auth check with state at the event.".into()); - } - info!("Auth check succeeded"); - - // We start looking at current room state now, so lets lock the room - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Now we calculate the set of extremities this room has after the incoming event has been - // applied. We start with the previous extremities (aka leaves) - info!("Calculating extremities"); - let mut extremities = db - .rooms - .get_pdu_leaves(room_id) - .map_err(|_| "Failed to load room leaves".to_owned())?; - - // Remove any forward extremities that are referenced by this incoming event's prev_events - for prev_event in &incoming_pdu.prev_events { - if extremities.contains(prev_event) { - extremities.remove(prev_event); - } - } - - // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); - - info!("Compressing state at event"); - let state_ids_compressed = state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, id, &db.globals) - .map_err(|_| "Failed to compress_state_event".to_owned()) - }) - .collect::>()?; - - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - info!("Starting soft fail auth check"); - - let auth_events = db - .rooms - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; - - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if soft_fail { - append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; - - // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {:?}", incoming_pdu); - db.rooms - .mark_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to set soft failed flag".to_owned())?; - return Err("Event has been soft failed".into()); - } - - if incoming_pdu.state_key.is_some() { - info!("Loading current room state ids"); - let current_sstatehash = db - .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? - .expect("every room has state"); - - let current_state_ids = db - .rooms - .state_full_ids(current_sstatehash) - .await - .map_err(|_| "Failed to load room state.")?; - - info!("Preparing for stateres to derive new room state"); - let mut extremity_sstatehashes = HashMap::new(); - - info!("Loading extremities"); - for id in dbg!(&extremities) { - match db - .rooms - .get_pdu(id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? - { - Some(leaf_pdu) => { - extremity_sstatehashes.insert( - db.rooms - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - "Found pdu with no statehash in db.".to_owned() - })?, - leaf_pdu, - ); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); - } - } - } - - let mut fork_states = Vec::new(); - - // 12. Ensure that the state is derived from the previous current state (i.e. we calculated - // by doing state res where one of the inputs was a previously trusted set of state, - // don't just trust a set of state we got from a remote). - - // We do this by adding the current state to the list of fork states - extremity_sstatehashes.remove(¤t_sstatehash); - fork_states.push(current_state_ids); - - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); - } - fork_states.push(state_after); - - let mut update_state = false; - // 14. Use state resolution to find new room state - let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); - } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { - info!("State resolution trivial"); - // There was only one state, so it has to be the room's current state (because that is - // always included) - fork_states[0] - .iter() - .map(|(k, id)| { - db.rooms - .compress_state_event(*k, id, &db.globals) - .map_err(|_| "Failed to compress_state_event.".to_owned()) - }) - .collect::>()? - } else { - info!("Loading auth chains"); - // We do need to force an update to this room's state - update_state = true; - - let mut auth_chain_sets = Vec::new(); - for state in &fork_states { - auth_chain_sets.push( - get_auth_chain( - room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), - ); - } - - info!("Loading fork states"); - - let fork_states: Vec<_> = fork_states - .into_iter() - .map(|map| { - map.into_iter() - .filter_map(|(k, id)| { - db.rooms - .get_statekey_from_short(k) - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) - .ok() - }) - .collect::>() - }) - .collect(); - - info!("Resolving state"); - - let lock = db.globals.stateres_mutex.lock(); - let state = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); - } - }; - - drop(lock); - - info!("State resolution done. Compressing state"); - - state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - db.rooms - .compress_state_event(shortstatekey, &event_id, &db.globals) - .map_err(|_| "Failed to compress state event".to_owned()) - }) - .collect::>()? - }; - - // Set the new room state to the resolved state - if update_state { - info!("Forcing new room state"); - db.rooms - .force_state(room_id, new_room_state, db) - .map_err(|_| "Failed to set new room state.".to_owned())?; - } - } - - info!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - - let pdu_id = append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; - - info!("Appended incoming pdu"); - - // Event has passed all auth/stateres checks - drop(state_lock); - Ok(pdu_id) -} - -/// Find the event and auth it. Once the event is validated (steps 1 - 8) -/// it is appended to the outliers Tree. -/// -/// Returns pdu and if we fetched it over federation the raw json. -/// -/// a. Look in the main timeline (pduid_pdu tree) -/// b. Look at outlier pdu tree -/// c. Ask origin server over federation -/// d. TODO: Ask other servers over federation? -#[tracing::instrument(skip_all)] -pub(crate) fn fetch_and_handle_outliers<'a>( - db: &'a Database, - origin: &'a ServerName, - events: &'a [Arc], - create_event: &'a PduEvent, - room_id: &'a RoomId, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { - Box::pin(async move { - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - let mut pdus = vec![]; - for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", id); - continue; - } - } - - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { - trace!("Found {} in db", id); - pdus.push((local_pdu, None)); - continue; - } - - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; - let mut events_in_reverse_order = Vec::new(); - let mut events_all = HashSet::new(); - let mut i = 0; - while let Some(next_id) = todo_auth_events.pop() { - if events_all.contains(&next_id) { - continue; - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - - if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { - trace!("Found {} in db", id); - continue; - } - - info!("Fetching {} over federation.", next_id); - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &next_id }, - ) - .await - { - Ok(res) => { - info!("Got {} over federation", next_id); - let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { - Ok(t) => t, - Err(_) => { - back_off((*next_id).to_owned()); - continue; - } - }; - - if calculated_event_id != *next_id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - next_id, calculated_event_id, &res.pdu); - } - - if let Some(auth_events) = - value.get("auth_events").and_then(|c| c.as_array()) - { - for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value(auth_event.clone().into()) - { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); - } - } - } else { - warn!("Auth event list invalid"); - } - - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - } - Err(_) => { - warn!("Failed to fetch event: {}", next_id); - back_off((*next_id).to_owned()); - } - } - } - - for (next_id, value) in events_in_reverse_order.iter().rev() { - match handle_outlier_pdu( - origin, - create_event, - next_id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => { - if next_id == id { - pdus.push((pdu, Some(json))); - } - } - Err(e) => { - warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()); - } - } - } - } - pdus - }) -} - /// Search the DB for the signing keys of the given server, if we don't have them /// fetch them from the server and save to our DB. #[tracing::instrument(skip_all)] @@ -2204,92 +1047,6 @@ pub(crate) async fn fetch_signing_keys( )) } -/// Append the incoming event setting the state snapshot to the state from the -/// server that sent the event. -#[tracing::instrument(skip_all)] -fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex -) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - for appservice in db.appservice.all()? { - if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - db.rooms - .room_aliases(&pdu.room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(Some(pdu_id)) -} - #[tracing::instrument(skip(starting_events, db))] pub(crate) async fn get_auth_chain<'a>( room_id: &RoomId, @@ -2745,35 +1502,6 @@ pub async fn create_join_event_template_route( } } - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(&body.room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default version - // right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { @@ -2798,89 +1526,7 @@ pub async fn create_join_event_template_route( let state_key = body.user_id.to_string(); let kind = StateEventType::RoomMember; - let auth_events = db.rooms.get_auth_events( - &body.room_id, - &kind.to_string().into(), - &body.user_id, - Some(&state_key), - &content, - )?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - to_raw_value(&prev_pdu.sender).expect("UserId is valid"), - ); - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: body.room_id.clone(), - sender: body.user_id.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: kind.to_string().into(), - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); + let (pdu, pdu_json) = create_hash_and_sign_event(); Ok(prepare_join_event::v1::Response { room_version: Some(room_version_id), diff --git a/src/service/rooms/alias.rs b/src/service/rooms/alias/mod.rs similarity index 100% rename from src/service/rooms/alias.rs rename to src/service/rooms/alias/mod.rs diff --git a/src/service/rooms/directory.rs b/src/service/rooms/directory/mod.rs similarity index 100% rename from src/service/rooms/directory.rs rename to src/service/rooms/directory/mod.rs diff --git a/src/service/rooms/edus.rs b/src/service/rooms/edus/mod.rs similarity index 100% rename from src/service/rooms/edus.rs rename to src/service/rooms/edus/mod.rs diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 6fa83e4c..e59219b2 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,886 +1,3 @@ -use crate::{ - client_server::{self, claim_keys_helper, get_keys_helper}, - database::{rooms::CompressedStateEvent, DatabaseGuard}, - pdu::EventHash, - utils, Database, Error, PduEvent, Result, Ruma, -}; -use axum::{response::IntoResponse, Json}; -use futures_util::{stream::FuturesUnordered, StreamExt}; -use get_profile_information::v1::ProfileField; -use http::header::{HeaderValue, AUTHORIZATION}; -use regex::Regex; -use ruma::{ - api::{ - client::error::{Error as RumaError, ErrorKind}, - federation::{ - authorization::get_event_authorization, - device::get_devices::{self, v1::UserDevice}, - directory::{get_public_rooms, get_public_rooms_filtered}, - discovery::{ - get_remote_server_keys, get_remote_server_keys_batch, - get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys, - get_server_version, ServerSigningKeys, VerifyKey, - }, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, - keys::{claim_keys, get_keys}, - membership::{ - create_invite, - create_join_event::{self, RoomState}, - prepare_join_event, - }, - query::{get_profile_information, get_room_information}, - transactions::{ - edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, - send_transaction_message, - }, - }, - EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, - SendAccessToken, - }, - directory::{IncomingFilter, IncomingRoomNetwork}, - events::{ - receipt::{ReceiptEvent, ReceiptEventContent}, - room::{ - create::RoomCreateEventContent, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - server_acl::RoomServerAclEventContent, - }, - RoomEventType, StateEventType, - }, - int, - receipt::ReceiptType, - serde::{Base64, JsonObject, Raw}, - signatures::{CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, RoomVersion, StateMap}, - to_device::DeviceIdOrAllDevices, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, -}; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use std::{ - collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, - fmt::Debug, - future::Future, - mem, - net::{IpAddr, SocketAddr}, - ops::Deref, - pin::Pin, - sync::{Arc, RwLock, RwLockWriteGuard}, - time::{Duration, Instant, SystemTime}, -}; -use tokio::sync::{MutexGuard, Semaphore}; -use tracing::{debug, error, info, trace, warn}; - -/// Wraps either an literal IP address plus port, or a hostname plus complement -/// (colon-plus-port if it was specified). -/// -/// Note: A `FedDest::Named` might contain an IP address in string form if there -/// was no port specified to construct a SocketAddr with. -/// -/// # Examples: -/// ```rust -/// # use conduit::server_server::FedDest; -/// # fn main() -> Result<(), std::net::AddrParseError> { -/// FedDest::Literal("198.51.100.3:8448".parse()?); -/// FedDest::Literal("[2001:db8::4:5]:443".parse()?); -/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); -/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); -/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); -/// # Ok(()) -/// # } -/// ``` -#[derive(Clone, Debug, PartialEq)] -pub enum FedDest { - Literal(SocketAddr), - Named(String, String), -} - -impl FedDest { - fn into_https_string(self) -> String { - match self { - Self::Literal(addr) => format!("https://{}", addr), - Self::Named(host, port) => format!("https://{}{}", host, port), - } - } - - fn into_uri_string(self) -> String { - match self { - Self::Literal(addr) => addr.to_string(), - Self::Named(host, ref port) => host + port, - } - } - - fn hostname(&self) -> String { - match &self { - Self::Literal(addr) => addr.ip().to_string(), - Self::Named(host, _) => host.clone(), - } - } - - fn port(&self) -> Option { - match &self { - Self::Literal(addr) => Some(addr.port()), - Self::Named(_, port) => port[1..].parse().ok(), - } - } -} - -#[tracing::instrument(skip(globals, request))] -pub(crate) async fn send_request( - globals: &crate::database::globals::Globals, - destination: &ServerName, - request: T, -) -> Result -where - T: Debug, -{ - if !globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut write_destination_to_cache = false; - - let cached_result = globals - .actual_destination_cache - .read() - .unwrap() - .get(destination) - .cloned(); - - let (actual_destination, host) = if let Some(result) = cached_result { - result - } else { - write_destination_to_cache = true; - - let result = find_actual_destination(globals, destination).await; - - (result.0, result.1.into_uri_string()) - }; - - let actual_destination_str = actual_destination.clone().into_https_string(); - - let mut http_request = request - .try_into_http_request::>( - &actual_destination_str, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!( - "Failed to find destination {}: {}", - actual_destination_str, e - ); - Error::BadServerResponse("Invalid destination") - })?; - - let mut request_map = serde_json::Map::new(); - - if !http_request.body().is_empty() { - request_map.insert( - "content".to_owned(), - serde_json::from_slice(http_request.body()) - .expect("body is valid json, we just created it"), - ); - }; - - request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); - request_map.insert( - "uri".to_owned(), - http_request - .uri() - .path_and_query() - .expect("all requests have a path") - .to_string() - .into(), - ); - request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); - request_map.insert("destination".to_owned(), destination.as_str().into()); - - let mut request_json = - serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); - - ruma::signatures::sign_json( - globals.server_name().as_str(), - globals.keypair(), - &mut request_json, - ) - .expect("our request json is what ruma expects"); - - let request_json: serde_json::Map = - serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); - - let signatures = request_json["signatures"] - .as_object() - .unwrap() - .values() - .map(|v| { - v.as_object() - .unwrap() - .iter() - .map(|(k, v)| (k, v.as_str().unwrap())) - }); - - for signature_server in signatures { - for s in signature_server { - http_request.headers_mut().insert( - AUTHORIZATION, - HeaderValue::from_str(&format!( - "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - globals.server_name(), - s.0, - s.1 - )) - .unwrap(), - ); - } - } - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - let url = reqwest_request.url().clone(); - - let response = globals.federation_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - warn!( - "{} {}: {}", - url, - status, - String::from_utf8_lossy(&body) - .lines() - .collect::>() - .join(" ") - ); - } - - let http_response = http_response_builder - .body(body) - .expect("reqwest body is valid http body"); - - if status == 200 { - let response = T::IncomingResponse::try_from_http_response(http_response); - if response.is_ok() && write_destination_to_cache { - globals.actual_destination_cache.write().unwrap().insert( - Box::::from(destination), - (actual_destination, host), - ); - } - - response.map_err(|e| { - warn!( - "Invalid 200 response from {} on: {} {}", - &destination, url, e - ); - Error::BadServerResponse("Server returned bad 200 response.") - }) - } else { - Err(Error::FederationError( - destination.to_owned(), - RumaError::try_from_http_response(http_response).map_err(|e| { - warn!( - "Invalid {} response from {} on: {} {}", - status, &destination, url, e - ); - Error::BadServerResponse("Server returned bad error response.") - })?, - )) - } - } - Err(e) => Err(e.into()), - } -} - -fn get_ip_with_port(destination_str: &str) -> Option { - if let Ok(destination) = destination_str.parse::() { - Some(FedDest::Literal(destination)) - } else if let Ok(ip_addr) = destination_str.parse::() { - Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) - } else { - None - } -} - -fn add_port_to_hostname(destination_str: &str) -> FedDest { - let (host, port) = match destination_str.find(':') { - None => (destination_str, ":8448"), - Some(pos) => destination_str.split_at(pos), - }; - FedDest::Named(host.to_owned(), port.to_owned()) -} - -/// Returns: actual_destination, host header -/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names -/// Numbers in comments below refer to bullet points in linked section of specification -#[tracing::instrument(skip(globals))] -async fn find_actual_destination( - globals: &crate::database::globals::Globals, - destination: &'_ ServerName, -) -> (FedDest, FedDest) { - let destination_str = destination.as_str().to_owned(); - let mut hostname = destination_str.clone(); - let actual_destination = match get_ip_with_port(&destination_str) { - Some(host_port) => { - // 1: IP literal with provided or default port - host_port - } - None => { - if let Some(pos) = destination_str.find(':') { - // 2: Hostname with included port - let (host, port) = destination_str.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - match request_well_known(globals, destination.as_str()).await { - // 3: A .well-known file is available - Some(delegated_hostname) => { - hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); - match get_ip_with_port(&delegated_hostname) { - Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file - None => { - if let Some(pos) = delegated_hostname.find(':') { - // 3.2: Hostname with port in .well-known file - let (host, port) = delegated_hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - // Delegated hostname has no port in this branch - if let Some(hostname_override) = - query_srv_record(globals, &delegated_hostname).await - { - // 3.3: SRV lookup successful - let force_port = hostname_override.port(); - - if let Ok(override_ip) = globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - globals.tls_name_override.write().unwrap().insert( - delegated_hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named(delegated_hostname, format!(":{}", port)) - } else { - add_port_to_hostname(&delegated_hostname) - } - } else { - // 3.4: No SRV records, just use the hostname from .well-known - add_port_to_hostname(&delegated_hostname) - } - } - } - } - } - // 4: No .well-known or an error occured - None => { - match query_srv_record(globals, &destination_str).await { - // 4: SRV record found - Some(hostname_override) => { - let force_port = hostname_override.port(); - - if let Ok(override_ip) = globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - globals.tls_name_override.write().unwrap().insert( - hostname.clone(), - (override_ip.iter().collect(), force_port.unwrap_or(8448)), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named(hostname.clone(), format!(":{}", port)) - } else { - add_port_to_hostname(&hostname) - } - } - // 5: No SRV record found - None => add_port_to_hostname(&destination_str), - } - } - } - } - } - }; - - // Can't use get_ip_with_port here because we don't want to add a port - // to an IP address if it wasn't specified - let hostname = if let Ok(addr) = hostname.parse::() { - FedDest::Literal(addr) - } else if let Ok(addr) = hostname.parse::() { - FedDest::Named(addr.to_string(), ":8448".to_owned()) - } else if let Some(pos) = hostname.find(':') { - let (host, port) = hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - FedDest::Named(hostname, ":8448".to_owned()) - }; - (actual_destination, hostname) -} - -#[tracing::instrument(skip(globals))] -async fn query_srv_record( - globals: &crate::database::globals::Globals, - hostname: &'_ str, -) -> Option { - if let Ok(Some(host_port)) = globals - .dns_resolver() - .srv_lookup(format!("_matrix._tcp.{}", hostname)) - .await - .map(|srv| { - srv.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()), - ) - }) - }) - { - Some(host_port) - } else { - None - } -} - -#[tracing::instrument(skip(globals))] -async fn request_well_known( - globals: &crate::database::globals::Globals, - destination: &str, -) -> Option { - let body: serde_json::Value = serde_json::from_str( - &globals - .default_client() - .get(&format!( - "https://{}/.well-known/matrix/server", - destination - )) - .send() - .await - .ok()? - .text() - .await - .ok()?, - ) - .ok()?; - Some(body.get("m.server")?.as_str()?.to_owned()) -} - -/// # `GET /_matrix/federation/v1/version` -/// -/// Get version information on this server. -pub async fn get_server_version_route( - db: DatabaseGuard, - _body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - Ok(get_server_version::v1::Response { - server: Some(get_server_version::v1::Server { - name: Some("Conduit".to_owned()), - version: Some(env!("CARGO_PKG_VERSION").to_owned()), - }), - }) -} - -/// # `GET /_matrix/key/v2/server` -/// -/// Gets the public signing keys of this server. -/// -/// - Matrix does not support invalidating public keys, so the key returned by this will be valid -/// forever. -// Response type for this endpoint is Json because we need to calculate a signature for the response -pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); - verify_keys.insert( - format!("ed25519:{}", db.globals.keypair().version()) - .try_into() - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: Base64::new(db.globals.keypair().public_key().to_vec()), - }, - ); - let mut response = serde_json::from_slice( - get_server_keys::v2::Response { - server_key: Raw::new(&ServerSigningKeys { - server_name: db.globals.server_name().to_owned(), - verify_keys, - old_verify_keys: BTreeMap::new(), - signatures: BTreeMap::new(), - valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(86400 * 7), - ) - .expect("time is valid"), - }) - .expect("static conversion, no errors"), - } - .try_into_http_response::>() - .unwrap() - .body(), - ) - .unwrap(); - - ruma::signatures::sign_json( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut response, - ) - .unwrap(); - - Ok(Json(response)) -} - -/// # `GET /_matrix/key/v2/server/{keyId}` -/// -/// Gets the public signing keys of this server. -/// -/// - Matrix does not support invalidating public keys, so the key returned by this will be valid -/// forever. -pub async fn get_server_keys_deprecated_route(db: DatabaseGuard) -> impl IntoResponse { - get_server_keys_route(db).await -} - -/// # `POST /_matrix/federation/v1/publicRooms` -/// -/// Lists the public rooms on this server. -pub async fn get_public_rooms_filtered_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let response = client_server::get_public_rooms_filtered_helper( - &db, - None, - body.limit, - body.since.as_deref(), - &body.filter, - &body.room_network, - ) - .await?; - - Ok(get_public_rooms_filtered::v1::Response { - chunk: response.chunk, - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - }) -} - -/// # `GET /_matrix/federation/v1/publicRooms` -/// -/// Lists the public rooms on this server. -pub async fn get_public_rooms_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let response = client_server::get_public_rooms_filtered_helper( - &db, - None, - body.limit, - body.since.as_deref(), - &IncomingFilter::default(), - &IncomingRoomNetwork::Matrix, - ) - .await?; - - Ok(get_public_rooms::v1::Response { - chunk: response.chunk, - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - }) -} - -/// # `PUT /_matrix/federation/v1/send/{txnId}` -/// -/// Push EDUs and PDUs to this server. -pub async fn send_transaction_message_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let mut resolved_map = BTreeMap::new(); - - let pub_key_map = RwLock::new(BTreeMap::new()); - - // This is all the auth_events that have been recursively fetched so they don't have to be - // deserialized over and over again. - // TODO: make this persist across requests but not in a DB Tree (in globals?) - // TODO: This could potentially also be some sort of trie (suffix tree) like structure so - // that once an auth event is known it would know (using indexes maybe) all of the auth - // events that it references. - // let mut auth_cache = EventMap::new(); - - for pdu in &body.pdus { - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - continue; - } - }; - - // 0. Check the server is in the room - let room_id = match value - .get("room_id") - .and_then(|id| RoomId::parse(id.as_str()?).ok()) - { - Some(id) => id, - None => { - // Event is invalid - resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned())); - continue; - } - }; - - acl_check(&sender_servername, &room_id, &db)?; - - let mutex = Arc::clone( - db.globals - .roomid_mutex_federation - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let start_time = Instant::now(); - resolved_map.insert( - event_id.clone(), - handle_incoming_pdu( - &sender_servername, - &event_id, - &room_id, - value, - true, - &db, - &pub_key_map, - ) - .await - .map(|_| ()), - ); - drop(mutex_lock); - - let elapsed = start_time.elapsed(); - warn!( - "Handling transaction of event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - - for pdu in &resolved_map { - if let Err(e) = pdu.1 { - if e != "Room is unknown to this server." { - warn!("Incoming PDU failed {:?}", pdu); - } - } - } - - for edu in body - .edus - .iter() - .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) - { - match edu { - Edu::Presence(_) => {} - Edu::Receipt(receipt) => { - for (room_id, room_updates) in receipt.receipts { - for (user_id, user_updates) in room_updates.read { - if let Some((event_id, _)) = user_updates - .event_ids - .iter() - .filter_map(|id| { - db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) - }) - .max_by_key(|(_, count)| *count) - { - let mut user_receipts = BTreeMap::new(); - user_receipts.insert(user_id.clone(), user_updates.data); - - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event_id.to_owned(), receipts); - - let event = ReceiptEvent { - content: ReceiptEventContent(receipt_content), - room_id: room_id.clone(), - }; - db.rooms.edus.readreceipt_update( - &user_id, - &room_id, - event, - &db.globals, - )?; - } else { - // TODO fetch missing events - info!("No known event ids in read receipt: {:?}", user_updates); - } - } - } - } - Edu::Typing(typing) => { - if db.rooms.is_joined(&typing.user_id, &typing.room_id)? { - if typing.typing { - db.rooms.edus.typing_add( - &typing.user_id, - &typing.room_id, - 3000 + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms.edus.typing_remove( - &typing.user_id, - &typing.room_id, - &db.globals, - )?; - } - } - } - Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { - db.users - .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; - } - Edu::DirectToDevice(DirectDeviceContent { - sender, - ev_type, - message_id, - messages, - }) => { - // Check if this is a new transaction id - if db - .transaction_ids - .existing_txnid(&sender, None, &message_id)? - .is_some() - { - continue; - } - - for (target_user_id, map) in &messages { - for (target_device_id_maybe, event) in map { - match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => { - db.users.add_to_device_event( - &sender, - target_user_id, - target_device_id, - &ev_type.to_string(), - event.deserialize_as().map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Event is invalid", - ) - })?, - &db.globals, - )? - } - - DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(target_user_id) { - db.users.add_to_device_event( - &sender, - target_user_id, - &target_device_id?, - &ev_type.to_string(), - event.deserialize_as().map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Event is invalid", - ) - })?, - &db.globals, - )?; - } - } - } - } - } - - // Save transaction id with empty data - db.transaction_ids - .add_txnid(&sender, None, &message_id, &[])?; - } - Edu::SigningKeyUpdate(SigningKeyUpdateContent { - user_id, - master_key, - self_signing_key, - }) => { - if user_id.server_name() != sender_servername { - continue; - } - if let Some(master_key) = master_key { - db.users.add_cross_signing_keys( - &user_id, - &master_key, - &self_signing_key, - &None, - &db.rooms, - &db.globals, - )?; - } - } - Edu::_Custom(_) => {} - } - } - - db.flush()?; - - Ok(send_transaction_message::v1::Response { pdus: resolved_map }) -} /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; @@ -918,37 +35,25 @@ pub(crate) async fn handle_incoming_pdu<'a>( is_timeline_event: bool, db: &'a Database, pub_key_map: &'a RwLock>>, -) -> Result>, String> { - match db.rooms.exists(room_id) { - Ok(true) => {} - _ => { - return Err("Room is unknown to this server.".to_owned()); - } - } - - match db.rooms.is_disabled(room_id) { - Ok(false) => {} - _ => { - return Err("Federation of this room is currently disabled on this server.".to_owned()); - } - } +) -> Result>> { + db.rooms.exists(room_id)?.ok_or(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"))?; + db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of this room is currently disabled on this server."))?; + // 1. Skip the PDU if we already have it as a timeline event - if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { - return Ok(Some(pdu_id.to_vec())); + if let Some(pdu_id) = db.rooms.get_pdu_id(event_id)? { + return Some(pdu_id.to_vec()); } let create_event = db .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.".to_owned())? - .ok_or_else(|| "Failed to find create event in db.".to_owned())?; + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; let first_pdu_in_room = db .rooms - .first_pdu_in_room(room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists"); + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; let (incoming_pdu, val) = handle_outlier_pdu( origin, @@ -966,100 +71,26 @@ pub(crate) async fn handle_incoming_pdu<'a>( return Ok(None); } + // Skip old events if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { return Ok(None); } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let mut graph: HashMap, _> = HashMap::new(); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = incoming_pdu.prev_events.clone(); + let sorted_prev_events = fetch_unknown_prev_events(incoming_pdu.prev_events.clone()); - let mut amount = 0; + let mut errors = 0; + for prev_id in dbg!(sorted) { + // Check for disabled again because it might have changed + db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of + this room is currently disabled on this server."))?; - while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() - { - if amount > 100 { - // Max limit reached - warn!("Max prev event limit reached!"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } - - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) - { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount += 1; - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); - } - } - - graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), - ); - } else { - // Time based check failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); - } - } - - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - println!("{}", event_id); - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; - - let mut errors = 0; - for prev_id in dbg!(sorted) { - match db.rooms.is_disabled(room_id) { - Ok(false) => {} - _ => { - return Err( - "Federation of this room is currently disabled on this server.".to_owned(), - ); - } - } - - if let Some((time, tries)) = db - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&*prev_id) + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&*prev_id) { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); @@ -1076,7 +107,9 @@ pub(crate) async fn handle_incoming_pdu<'a>( if errors >= 5 { break; } + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { + // Skip old events if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { continue; } @@ -1087,6 +120,7 @@ pub(crate) async fn handle_incoming_pdu<'a>( .write() .unwrap() .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + if let Err(e) = upgrade_outlier_to_timeline_pdu( pdu, json, @@ -1130,6 +164,8 @@ pub(crate) async fn handle_incoming_pdu<'a>( } } + // Done with prev events, now handling the incoming event + let start_time = Instant::now(); db.globals .roomid_federationhandletime @@ -1171,16 +207,14 @@ fn handle_outlier_pdu<'a>( // We go through all the signatures we see on the value and fetch the corresponding signing // keys fetch_required_signing_keys(&value, pub_key_map, db) - .await - .map_err(|e| e.to_string())?; + .await?; // 2. Check signatures, otherwise drop // 3. check content hash, redact if doesn't match - let create_event_content: RoomCreateEventContent = serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() + error!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") })?; let room_version_id = &create_event_content.room_version; @@ -1220,7 +254,7 @@ fn handle_outlier_pdu<'a>( // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // EDIT: Step 5 is not applied anymore because it failed too often + // NOTE: Step 5 is not applied anymore because it failed too often warn!("Fetching auth events for {}", incoming_pdu.event_id); fetch_and_handle_outliers( db, @@ -1245,7 +279,7 @@ fn handle_outlier_pdu<'a>( // Build map of auth events let mut auth_events = HashMap::new(); for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? { + let auth_event = match db.rooms.get_pdu(id)? { Some(e) => e, None => { warn!("Could not find auth event {}", id); @@ -1264,10 +298,9 @@ fn handle_outlier_pdu<'a>( v.insert(auth_event); } hash_map::Entry::Occupied(_) => { - return Err( + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth event's type and state_key combination exists multiple times." - .to_owned(), - ) + )); } } } @@ -1278,7 +311,7 @@ fn handle_outlier_pdu<'a>( .map(|a| a.as_ref()) != Some(create_event) { - return Err("Incoming event refers to wrong create event.".to_owned()); + return Err(Error::BadRequest(ErrorKind::InvalidParam("Incoming event refers to wrong create event."))); } if !state_res::event_auth::auth_check( @@ -1287,17 +320,17 @@ fn handle_outlier_pdu<'a>( None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), ) - .map_err(|_e| "Auth check failed".to_owned())? + .map_err(|e| {error!(e); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")})? { - return Err("Event has failed auth check with auth events.".to_owned()); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); } info!("Validation successful."); // 7. Persist the event as an outlier. db.rooms - .add_pdu_outlier(&incoming_pdu.event_id, &val) - .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; + .add_pdu_outlier(&incoming_pdu.event_id, &val)?; + info!("Added pdu as outlier."); Ok((Arc::new(incoming_pdu), val)) @@ -1314,6 +347,7 @@ async fn upgrade_outlier_to_timeline_pdu( room_id: &RoomId, pub_key_map: &RwLock>>, ) -> Result>, String> { + // Skip the PDU if we already have it as a timeline event if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); } @@ -1331,7 +365,7 @@ async fn upgrade_outlier_to_timeline_pdu( let create_event_content: RoomCreateEventContent = serde_json::from_str(create_event.content.get()).map_err(|e| { warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() + Error::BadDatabase("Invalid create event in db") })?; let room_version_id = &create_event_content.room_version; @@ -2039,1606 +1073,80 @@ pub(crate) fn fetch_and_handle_outliers<'a>( }) } -/// Search the DB for the signing keys of the given server, if we don't have them -/// fetch them from the server and save to our DB. -#[tracing::instrument(skip_all)] -pub(crate) async fn fetch_signing_keys( - db: &Database, - origin: &ServerName, - signature_ids: Vec, -) -> Result> { - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let permit = db - .globals - .servername_ratelimiter - .read() - .unwrap() - .get(origin) - .map(|s| Arc::clone(s).acquire_owned()); - - let permit = match permit { - Some(p) => p, - None => { - let mut write = db.globals.servername_ratelimiter.write().unwrap(); - let s = Arc::clone( - write - .entry(origin.to_owned()) - .or_insert_with(|| Arc::new(Semaphore::new(1))), - ); - - s.acquire_owned() - } - } - .await; - - let back_off = |id| match db - .globals - .bad_signature_ratelimiter - .write() - .unwrap() - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - if let Some((time, tries)) = db - .globals - .bad_signature_ratelimiter - .read() - .unwrap() - .get(&signature_ids) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {:?}", signature_ids); - return Err(Error::BadServerResponse("bad signature, still backing off")); - } - } - - trace!("Loading signing keys for {}", origin); - let mut result: BTreeMap<_, _> = db - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - if contains_all_ids(&result) { - return Ok(result); - } +fn fetch_unknown_prev_events(initial_set: Vec>) -> Vec> { + let mut graph: HashMap, _> = HashMap::new(); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = initial_set; - debug!("Fetching signing keys for {} over federation", origin); + let mut amount = 0; - if let Some(server_key) = db - .sending - .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, json_opt)) = fetch_and_handle_outliers( + db, + origin, + &[prev_event_id.clone()], + &create_event, + room_id, + pub_key_map, + ) .await - .ok() - .and_then(|resp| resp.server_key.deserialize().ok()) - { - db.globals.add_signing_key(origin, server_key.clone())?; - - result.extend( - server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - - if contains_all_ids(&result) { - return Ok(result); - } - } - - for server in db.globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Some(server_keys) = db - .sending - .send_federation_request( - &db.globals, - server, - get_remote_server_keys::v2::Request::new( - origin, - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ) - .expect("time is valid"), - ), - ) - .await - .ok() - .map(|resp| { - resp.server_keys - .into_iter() - .filter_map(|e| e.deserialize().ok()) - .collect::>() - }) + .pop() { - trace!("Got signing keys: {:?}", server_keys); - for k in server_keys { - db.globals.add_signing_key(origin, k.clone())?; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - - if contains_all_ids(&result) { - return Ok(result); + if amount > 100 { + // Max limit reached + warn!("Max prev event limit reached!"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; } - } - } - - drop(permit); - - back_off(signature_ids); - - warn!("Failed to find public key for server: {}", origin); - Err(Error::BadServerResponse( - "Failed to find public key for server", - )) -} - -/// Append the incoming event setting the state snapshot to the state from the -/// server that sent the event. -#[tracing::instrument(skip_all)] -fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex -) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - for appservice in db.appservice.all()? { - if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - db.rooms - .room_aliases(&pdu.room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) - || users.iter().any(matching_users) + if let Some(json) = + json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(Some(pdu_id)) -} - -#[tracing::instrument(skip(starting_events, db))] -pub(crate) async fn get_auth_chain<'a>( - room_id: &RoomId, - starting_events: Vec>, - db: &'a Database, -) -> Result> + 'a> { - const NUM_BUCKETS: usize = 50; - - let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; - - let mut i = 0; - for id in starting_events { - let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; - let bucket_id = (short % NUM_BUCKETS as u64) as usize; - buckets[bucket_id].insert((short, id.clone())); - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - let mut full_auth_chain = HashSet::new(); - - let mut hits = 0; - let mut misses = 0; - for chunk in buckets { - if chunk.is_empty() { - continue; - } - - let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { - hits += 1; - full_auth_chain.extend(cached.iter().copied()); - continue; - } - misses += 1; - - let mut chunk_cache = HashSet::new(); - let mut hits2 = 0; - let mut misses2 = 0; - let mut i = 0; - for (sevent_id, event_id) in chunk { - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { - hits2 += 1; - chunk_cache.extend(cached.iter().copied()); - } else { - misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); - db.rooms - .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; - println!( - "cache missed event {} with auth chain len {}", - event_id, - auth_chain.len() - ); - chunk_cache.extend(auth_chain.iter()); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - }; - } - println!( - "chunk missed with len {}, event hits2: {}, misses2: {}", - chunk_cache.len(), - hits2, - misses2 - ); - let chunk_cache = Arc::new(chunk_cache); - db.rooms - .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; - full_auth_chain.extend(chunk_cache.iter()); - } - - println!( - "total: {}, chunk hits: {}, misses: {}", - full_auth_chain.len(), - hits, - misses - ); - - Ok(full_auth_chain - .into_iter() - .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) -} + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount += 1; + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(dbg!(prev_prev.clone())); + } + } -#[tracing::instrument(skip(event_id, db))] -fn get_auth_chain_inner( - room_id: &RoomId, - event_id: &EventId, - db: &Database, -) -> Result> { - let mut todo = vec![Arc::from(event_id)]; - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop() { - match db.rooms.get_pdu(&event_id) { - Ok(Some(pdu)) => { - if pdu.room_id != room_id { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); } - for auth_event in &pdu.auth_events { - let sauthevent = db - .rooms - .get_or_create_shorteventid(auth_event, &db.globals)?; - if !found.contains(&sauthevent) { - found.insert(sauthevent); - todo.push(auth_event.clone()); - } - } - } - Ok(None) => { - warn!("Could not find pdu mentioned in auth events: {}", event_id); - } - Err(e) => { - warn!("Could not load event in auth chain: {} {}", event_id, e); + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); } + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); } } - Ok(found) -} - -/// # `GET /_matrix/federation/v1/event/{eventId}` -/// -/// Retrieves a single event from the server. -/// -/// - Only works if a user of this server is currently invited or joined the room -pub async fn get_event_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let event = db - .rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; - - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - if !db.rooms.server_in_room(sender_servername, room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room", - )); - } - - Ok(get_event::v1::Response { - origin: db.globals.server_name().to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdu: PduEvent::convert_to_outgoing_federation_event(event), + let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), + )) }) -} - -/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` -/// -/// Retrieves events that the sender is missing. -pub async fn get_missing_events_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room", - )); - } - - acl_check(sender_servername, &body.room_id, &db)?; - - let mut queued_events = body.latest_events.clone(); - let mut events = Vec::new(); - - let mut i = 0; - while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { - if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let event_room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - if event_room_id != body.room_id { - warn!( - "Evil event detected: Event {} found while searching in room {}", - queued_events[i], body.room_id - ); - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Evil event detected", - )); - } + .map_err(|_| "Error sorting prev events".to_owned())?; - if body.earliest_events.contains(&queued_events[i]) { - i += 1; - continue; - } - queued_events.extend_from_slice( - &serde_json::from_value::>>( - serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { - Error::bad_database("Event in db has no prev_events field.") - })?) - .expect("canonical json is valid json value"), - ) - .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, - ); - events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); - } - i += 1; - } - - Ok(get_missing_events::v1::Response { events }) -} - -/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` -/// -/// Retrieves the auth chain for a given event. -/// -/// - This does not include the event itself -pub async fn get_event_authorization_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - acl_check(sender_servername, &body.room_id, &db)?; - - let event = db - .rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; - - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?; - - Ok(get_event_authorization::v1::Response { - auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - }) -} - -/// # `GET /_matrix/federation/v1/state/{roomId}` -/// -/// Retrieves the current state of the room. -pub async fn get_room_state_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - acl_check(sender_servername, &body.room_id, &db)?; - - let shortstatehash = db - .rooms - .pdu_shortstatehash(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pdus = db - .rooms - .state_full_ids(shortstatehash) - .await? - .into_iter() - .map(|(_, id)| { - PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&id).unwrap().unwrap(), - ) - }) - .collect(); - - let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; - - Ok(get_room_state::v1::Response { - auth_chain: auth_chain_ids - .map(|id| { - db.rooms.get_pdu_json(&id).map(|maybe_json| { - PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) - }) - }) - .filter_map(|r| r.ok()) - .collect(), - pdus, - }) -} - -/// # `GET /_matrix/federation/v1/state_ids/{roomId}` -/// -/// Retrieves the current state of the room. -pub async fn get_room_state_ids_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - acl_check(sender_servername, &body.room_id, &db)?; - - let shortstatehash = db - .rooms - .pdu_shortstatehash(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pdu_ids = db - .rooms - .state_full_ids(shortstatehash) - .await? - .into_iter() - .map(|(_, id)| (*id).to_owned()) - .collect(); - - let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; - - Ok(get_room_state_ids::v1::Response { - auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), - pdu_ids, - }) -} - -/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` -/// -/// Creates a join template. -pub async fn create_join_event_template_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - if !db.rooms.exists(&body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room is unknown to this server.", - )); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - acl_check(sender_servername, &body.room_id, &db)?; - - // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = - db.rooms - .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; - - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") - }) - }) - .transpose()?; - - if let Some(join_rules_event_content) = join_rules_event_content { - if matches!( - join_rules_event_content.join_rule, - JoinRule::Restricted { .. } - ) { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Conduit does not support restricted rooms yet.", - )); - } - } - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(&body.room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default version - // right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - if !body.ver.contains(&room_version_id) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, - "Room version not supported.", - )); - } - - let content = to_raw_value(&RoomMemberEventContent { - avatar_url: None, - blurhash: None, - displayname: None, - is_direct: None, - membership: MembershipState::Join, - third_party_invite: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("member event is valid value"); - - let state_key = body.user_id.to_string(); - let kind = StateEventType::RoomMember; - - let auth_events = db.rooms.get_auth_events( - &body.room_id, - &kind.to_string().into(), - &body.user_id, - Some(&state_key), - &content, - )?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - to_raw_value(&prev_pdu.sender).expect("UserId is valid"), - ); - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: body.room_id.clone(), - sender: body.user_id.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: kind.to_string().into(), - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - - Ok(prepare_join_event::v1::Response { - room_version: Some(room_version_id), - event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - }) -} - -async fn create_join_event( - db: &DatabaseGuard, - sender_servername: &ServerName, - room_id: &RoomId, - pdu: &RawJsonValue, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - if !db.rooms.exists(room_id)? { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Room is unknown to this server.", - )); - } - - acl_check(sender_servername, room_id, db)?; - - // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; - - let join_rules_event_content: Option = join_rules_event - .as_ref() - .map(|join_rules_event| { - serde_json::from_str(join_rules_event.content.get()).map_err(|e| { - warn!("Invalid join rules event: {}", e); - Error::bad_database("Invalid join rules event in db.") - }) - }) - .transpose()?; - - if let Some(join_rules_event_content) = join_rules_event_content { - if matches!( - join_rules_event_content.join_rule, - JoinRule::Restricted { .. } - ) { - return Err(Error::BadRequest( - ErrorKind::Unknown, - "Conduit does not support restricted rooms yet.", - )); - } - } - - // We need to return the state prior to joining, let's keep a reference to that here - let shortstatehash = db - .rooms - .current_shortstatehash(room_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pub_key_map = RwLock::new(BTreeMap::new()); - // let mut auth_cache = EventMap::new(); - - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; - - let origin: Box = serde_json::from_value( - serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event needs an origin field.", - ))?) - .expect("CanonicalJson is valid json value"), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - - let mutex = Arc::clone( - db.globals - .roomid_mutex_federation - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map) - .await - .map_err(|e| { - warn!("Error while handling incoming send join PDU: {}", e); - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; - drop(mutex_lock); - - let state_ids = db.rooms.state_full_ids(shortstatehash).await?; - let auth_chain_ids = get_auth_chain( - room_id, - state_ids.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .await?; - - let servers = db - .rooms - .room_servers(room_id) - .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); - - db.sending.send_pdu(servers, &pdu_id)?; - - db.flush()?; - - Ok(RoomState { - auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - state: state_ids - .iter() - .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten()) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - }) -} - -/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` -/// -/// Submits a signed join event. -pub async fn create_join_event_v1_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; - - Ok(create_join_event::v1::Response { room_state }) -} - -/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` -/// -/// Submits a signed join event. -pub async fn create_join_event_v2_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; - - Ok(create_join_event::v2::Response { room_state }) -} - -/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` -/// -/// Invites a remote user to a room. -pub async fn create_invite_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - acl_check(sender_servername, &body.room_id, &db)?; - - if !db.rooms.is_supported_version(&db, &body.room_version) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: body.room_version.clone(), - }, - "Server does not support this room version.", - )); - } - - let mut signed_event = utils::to_canonical_object(&body.event) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut signed_event, - &body.room_version, - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; - - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&signed_event, &body.room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - signed_event.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.into()), - ); - - let sender: Box<_> = serde_json::from_value( - signed_event - .get("sender") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event had no sender field.", - ))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; - - let invited_user: Box<_> = serde_json::from_value( - signed_event - .get("state_key") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event had no state_key field.", - ))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; - - let mut invite_state = body.invite_room_state.clone(); - - let mut event: JsonObject = serde_json::from_str(body.event.get()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; - - event.insert("event_id".to_owned(), "$dummy".into()); - - let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { - warn!("Invalid invite event: {}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") - })?; - - invite_state.push(pdu.to_stripped_state_event()); - - // If the room already exists, the remote server will notify us about the join via /send - if !db.rooms.exists(&pdu.room_id)? { - db.rooms.update_membership( - &body.room_id, - &invited_user, - MembershipState::Invite, - &sender, - Some(invite_state), - &db, - true, - )?; - } - - db.flush()?; - - Ok(create_invite::v2::Response { - event: PduEvent::convert_to_outgoing_federation_event(signed_event), - }) -} - -/// # `GET /_matrix/federation/v1/user/devices/{userId}` -/// -/// Gets information on all devices of the user. -pub async fn get_devices_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - Ok(get_devices::v1::Response { - user_id: body.user_id.clone(), - stream_id: db - .users - .get_devicelist_version(&body.user_id)? - .unwrap_or(0) - .try_into() - .expect("version will not grow that large"), - devices: db - .users - .all_devices_metadata(&body.user_id) - .filter_map(|r| r.ok()) - .filter_map(|metadata| { - Some(UserDevice { - keys: db - .users - .get_device_keys(&body.user_id, &metadata.device_id) - .ok()??, - device_id: metadata.device_id, - device_display_name: metadata.display_name, - }) - }) - .collect(), - master_key: db - .users - .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, - self_signing_key: db - .users - .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, - }) -} - -/// # `GET /_matrix/federation/v1/query/directory` -/// -/// Resolve a room alias to a room id. -pub async fn get_room_information_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let room_id = db - .rooms - .id_from_alias(&body.room_alias)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room alias not found.", - ))?; - - Ok(get_room_information::v1::Response { - room_id, - servers: vec![db.globals.server_name().to_owned()], - }) -} - -/// # `GET /_matrix/federation/v1/query/profile` -/// -/// Gets information on a profile. -pub async fn get_profile_information_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut displayname = None; - let mut avatar_url = None; - let mut blurhash = None; - - match &body.field { - Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, - Some(ProfileField::AvatarUrl) => { - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)? - } - // TODO: what to do with custom - Some(_) => {} - None => { - displayname = db.users.displayname(&body.user_id)?; - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)?; - } - } - - Ok(get_profile_information::v1::Response { - blurhash, - displayname, - avatar_url, - }) -} - -/// # `POST /_matrix/federation/v1/user/keys/query` -/// -/// Gets devices and identity keys for the given users. -pub async fn get_keys_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let result = get_keys_helper( - None, - &body.device_keys, - |u| Some(u.server_name()) == body.sender_servername.as_deref(), - &db, - ) - .await?; - - db.flush()?; - - Ok(get_keys::v1::Response { - device_keys: result.device_keys, - master_keys: result.master_keys, - self_signing_keys: result.self_signing_keys, - }) -} - -/// # `POST /_matrix/federation/v1/user/keys/claim` -/// -/// Claims one-time keys. -pub async fn claim_keys_route( - db: DatabaseGuard, - body: Ruma, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let result = claim_keys_helper(&body.one_time_keys, &db).await?; - - db.flush()?; - - Ok(claim_keys::v1::Response { - one_time_keys: result.one_time_keys, - }) -} - -#[tracing::instrument(skip_all)] -pub(crate) async fn fetch_required_signing_keys( - event: &BTreeMap, - pub_key_map: &RwLock>>, - db: &Database, -) -> Result<()> { - let signatures = event - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let fetch_res = fetch_signing_keys( - db, - signature_server.as_str().try_into().map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?, - signature_ids, - ) - .await; - - let keys = match fetch_res { - Ok(keys) => keys, - Err(_) => { - warn!("Signature verification failed: Could not fetch signing key.",); - continue; - } - }; - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(signature_server.clone(), keys); - } - - Ok(()) -} - -// Gets a list of servers for which we don't have the signing key yet. We go over -// the PDUs and either cache the key or add it to the list that needs to be retrieved. -fn get_server_keys_from_cache( - pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, - room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, - db: &Database, -) -> Result<()> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - - let event_id = format!( - "${}", - ruma::signatures::reference_hash(&value, room_version) - .expect("ruma can calculate reference hashes") - ); - let event_id = <&EventId>::try_from(event_id.as_str()) - .expect("ruma's reference hashes are valid event ids"); - - if let Some((time, tries)) = db - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(event_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", event_id); - return Err(Error::BadServerResponse("bad event, still backing off")); - } - } - - let signatures = value - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?; - - if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { - continue; - } - - trace!("Loading signing keys for {}", origin); - - let result: BTreeMap<_, _> = db - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if !contains_all_ids(&result) { - trace!("Signing key not loaded for {}", origin); - servers.insert(origin.to_owned(), BTreeMap::new()); - } - - pub_key_map.insert(origin.to_string(), result); - } - - Ok(()) -} - -pub(crate) async fn fetch_join_signing_keys( - event: &create_join_event::v2::Response, - room_version: &RoomVersionId, - pub_key_map: &RwLock>>, - db: &Database, -) -> Result<()> { - let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = - BTreeMap::new(); - - { - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - - // Try to fetch keys, failure is okay - // Servers we couldn't find in the cache will be added to `servers` - for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); - } - for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); - } - - drop(pkm); - } - - if servers.is_empty() { - // We had all keys locally - return Ok(()); - } - - for server in db.globals.trusted_servers() { - trace!("Asking batch signing keys from trusted server {}", server); - if let Ok(keys) = db - .sending - .send_federation_request( - &db.globals, - server, - get_remote_server_keys_batch::v2::Request { - server_keys: servers.clone(), - }, - ) - .await - { - trace!("Got signing keys: {:?}", keys); - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - for k in keys.server_keys { - let k = k.deserialize().unwrap(); - - // TODO: Check signature from trusted server? - servers.remove(&k.server_name); - - let result = db - .globals - .add_signing_key(&k.server_name, k.clone())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); - - pkm.insert(k.server_name.to_string(), result); - } - } - - if servers.is_empty() { - return Ok(()); - } - } - - let mut futures: FuturesUnordered<_> = servers - .into_iter() - .map(|(server, _)| async move { - ( - db.sending - .send_federation_request( - &db.globals, - &server, - get_server_keys::v2::Request::new(), - ) - .await, - server, - ) - }) - .collect(); - - while let Some(result) = futures.next().await { - if let (Ok(get_keys_response), origin) = result { - let result: BTreeMap<_, _> = db - .globals - .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); - } - } - - Ok(()) -} - -/// Returns Ok if the acl allows the server -fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { - let acl_event = match db - .rooms - .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? - { - Some(acl) => acl, - None => return Ok(()), - }; - - let acl_event_content: RoomServerAclEventContent = - match serde_json::from_str(acl_event.content.get()) { - Ok(content) => content, - Err(_) => { - warn!("Invalid ACL event"); - return Ok(()); - } - }; - - if acl_event_content.is_allowed(server_name) { - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server was denied by ACL", - )) - } -} - -#[cfg(test)] -mod tests { - use super::{add_port_to_hostname, get_ip_with_port, FedDest}; - - #[test] - fn ips_get_default_ports() { - assert_eq!( - get_ip_with_port("1.1.1.1"), - Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) - ); - assert_eq!( - get_ip_with_port("dead:beef::"), - Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) - ); - } - - #[test] - fn ips_keep_custom_ports() { - assert_eq!( - get_ip_with_port("1.1.1.1:1234"), - Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) - ); - assert_eq!( - get_ip_with_port("[dead::beef]:8933"), - Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) - ); - } - - #[test] - fn hostnames_get_default_ports() { - assert_eq!( - add_port_to_hostname("example.com"), - FedDest::Named(String::from("example.com"), String::from(":8448")) - ) - } - - #[test] - fn hostnames_keep_custom_ports() { - assert_eq!( - add_port_to_hostname("example.com:1337"), - FedDest::Named(String::from("example.com"), String::from(":1337")) - ) - } + sorted } diff --git a/src/service/rooms/lazy_loading.rs b/src/service/rooms/lazy_loading/mod.rs similarity index 100% rename from src/service/rooms/lazy_loading.rs rename to src/service/rooms/lazy_loading/mod.rs diff --git a/src/service/rooms/metadata.rs b/src/service/rooms/metadata/mod.rs similarity index 100% rename from src/service/rooms/metadata.rs rename to src/service/rooms/metadata/mod.rs diff --git a/src/service/rooms/outlier.rs b/src/service/rooms/outlier/mod.rs similarity index 65% rename from src/service/rooms/outlier.rs rename to src/service/rooms/outlier/mod.rs index afb0a147..340e93e4 100644 --- a/src/service/rooms/outlier.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,3 +1,12 @@ + /// Returns the pdu from the outlier tree. + pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -8,8 +17,6 @@ } /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. #[tracing::instrument(skip(self, pdu))] pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { self.eventid_outlierpdu.insert( diff --git a/src/service/rooms/pdu_metadata.rs b/src/service/rooms/pdu_metadata/mod.rs similarity index 100% rename from src/service/rooms/pdu_metadata.rs rename to src/service/rooms/pdu_metadata/mod.rs diff --git a/src/service/rooms/search.rs b/src/service/rooms/search/mod.rs similarity index 100% rename from src/service/rooms/search.rs rename to src/service/rooms/search/mod.rs diff --git a/src/service/rooms/short.rs b/src/service/rooms/short/mod.rs similarity index 100% rename from src/service/rooms/short.rs rename to src/service/rooms/short/mod.rs diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 4c75467f..4b42ca8e 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,120 +1,6 @@ - - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } +pub trait Data { + fn get_room_shortstatehash(room_id: &RoomId); +} /// Returns the last state hash key added to the db for the given room. #[tracing::instrument(skip(self))] @@ -128,382 +14,3 @@ }) } - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 4c75467f..eddfe9e0 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,133 +1,8 @@ +pub struct Service { + db: D, +} - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - +impl Service { /// Force the creation of a new StateHash and insert it into the db. /// /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. @@ -138,7 +13,7 @@ new_state_ids_compressed: HashSet, db: &Database, ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; + let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; let state_hash = self.calculate_hash( &new_state_ids_compressed @@ -237,49 +112,6 @@ Ok(()) } - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &StateEventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - /// Returns the leaf pdus of a room. #[tracing::instrument(skip(self))] pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { @@ -507,3 +339,4 @@ Ok(()) } +} diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 4c75467f..ae26a7c4 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,4 +1,3 @@ - /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] @@ -116,127 +115,6 @@ }) } - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - /// Returns the full room state. #[tracing::instrument(skip(self))] pub async fn room_state_full( @@ -280,230 +158,3 @@ } } - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } diff --git a/src/service/rooms/state_cache.rs b/src/service/rooms/state_cache/mod.rs similarity index 100% rename from src/service/rooms/state_cache.rs rename to src/service/rooms/state_cache/mod.rs diff --git a/src/service/rooms/state_compressor.rs b/src/service/rooms/state_compressor/mod.rs similarity index 100% rename from src/service/rooms/state_compressor.rs rename to src/service/rooms/state_compressor/mod.rs diff --git a/src/service/rooms/timeline.rs b/src/service/rooms/timeline/mod.rs similarity index 93% rename from src/service/rooms/timeline.rs rename to src/service/rooms/timeline/mod.rs index fd93344c..6299b16c 100644 --- a/src/service/rooms/timeline.rs +++ b/src/service/rooms/timeline/mod.rs @@ -100,16 +100,6 @@ .transpose() } - /// Returns the json of a pdu. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - /// Returns the json of a pdu. pub fn get_non_outlier_pdu_json( &self, @@ -487,19 +477,94 @@ _ => {} } + for appservice in db.appservice.all()? { + if self.appservice_in_room(room_id, &appservice, db)? { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + continue; + } + + // If the RoomMember event has a non-empty state_key, it is targeted at someone. + // If it is our appservice user, we send this PDU to it. + if pdu.kind == RoomEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + if let Some(appservice_uid) = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, db.globals.server_name()).ok() + }) + { + if state_key_uid == &appservice_uid { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + continue; + } + } + } + } + + if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + let aliases = namespaces + .get("aliases") + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) + .collect::>() + }); + let rooms = namespaces + .get("rooms") + .and_then(|rooms| rooms.as_sequence()); + + let matching_users = |users: &Regex| { + users.is_match(pdu.sender.as_str()) + || pdu.kind == RoomEventType::RoomMember + && pdu + .state_key + .as_ref() + .map_or(false, |state_key| users.is_match(state_key)) + }; + let matching_aliases = |aliases: &Regex| { + self.room_aliases(room_id) + .filter_map(|r| r.ok()) + .any(|room_alias| aliases.is_match(room_alias.as_str())) + }; + + if aliases.iter().any(matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || users.iter().any(matching_users) + { + db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + } + } + } + + Ok(pdu_id) } - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result> { + pub fn create_hash_and_sign_event( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + db: &Database, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> (PduEvent, CanonicalJsonObj) { let PduBuilder { event_type, content, @@ -508,13 +573,16 @@ redacts, } = pdu_builder; - let prev_events = self + let prev_events: Vec<_> = db + .rooms .get_pdu_leaves(room_id)? .into_iter() .take(20) - .collect::>(); + .collect(); - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + let create_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -532,7 +600,8 @@ .map_or(db.globals.default_room_version(), |create_event| { create_event.room_version }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + let room_version = + RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; @@ -540,12 +609,13 @@ // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) + .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) .max() .unwrap_or_else(|| uint!(0)) + uint!(1); let mut unsigned = unsigned.unwrap_or_default(); + if let Some(state_key) = &state_key { if let Some(prev_pdu) = self.room_state_get(room_id, &event_type.to_string().into(), state_key)? @@ -561,10 +631,10 @@ } } - let mut pdu = PduEvent { + let pdu = PduEvent { event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), - sender: sender.to_owned(), + sender: sender_user.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -616,7 +686,8 @@ // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), + to_canonical_value(db.globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), ); match ruma::signatures::hash_and_sign_event( @@ -655,6 +726,22 @@ // Generate short event id let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; + } + + /// Creates a new persisted data unit and adds it to a room. This function takes a + /// roomid_mutex_state, meaning that only this function is able to mutate the room state. + #[tracing::instrument(skip(self, db, _mutex_lock))] + pub fn build_and_append_pdu( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + db: &Database, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result> { + + let (pdu, pdu_json) = create_hash_and_sign_event()?; + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. @@ -692,83 +779,40 @@ db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); + Ok(pdu.event_id) + } - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; + /// Append the incoming event setting the state snapshot to the state from the + /// server that sent the event. + #[tracing::instrument(skip_all)] + fn append_incoming_pdu<'a>( + db: &Database, + pdu: &PduEvent, + pdu_json: CanonicalJsonObject, + new_room_leaves: impl IntoIterator + Clone + Debug, + state_ids_compressed: HashSet, + soft_fail: bool, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result>> { + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + db.rooms.set_event_state( + &pdu.event_id, + &pdu.room_id, + state_ids_compressed, + &db.globals, + )?; - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } + if soft_fail { + db.rooms + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + return Ok(None); } - Ok(pdu.event_id) + let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; + + Ok(Some(pdu_id)) } /// Returns an iterator over all PDUs in a room. diff --git a/src/service/rooms/user.rs b/src/service/rooms/user/mod.rs similarity index 100% rename from src/service/rooms/user.rs rename to src/service/rooms/user/mod.rs From cc801528899dd37afcf7669ae5ebfeb050fc1eb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Mon, 20 Jun 2022 12:08:58 +0200 Subject: [PATCH 314/445] refactor: split up force_state --- src/service/rooms/state/mod.rs | 54 ++------------------- src/service/rooms/state_compressor/mod.rs | 59 ++++++++++++++++++++++- 2 files changed, 62 insertions(+), 51 deletions(-) diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index eddfe9e0..da03ad4c 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -3,62 +3,16 @@ pub struct Service { } impl Service { - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. + /// Set the room to the given statehash and update caches. #[tracing::instrument(skip(self, new_state_ids_compressed, db))] pub fn force_state( &self, room_id: &RoomId, - new_state_ids_compressed: HashSet, + shortstatehash: u64, + statediffnew :HashSet, + statediffremoved :HashSet, db: &Database, ) -> Result<()> { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; for event_id in statediffnew.into_iter().filter_map(|new| { self.parse_compressed_state_event(new) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index a56c0f5f..197ce844 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -241,6 +241,64 @@ Ok(()) } + /// Returns the new shortstatehash + pub fn save_state( + room_id: &RoomId, + new_state_ids_compressed: HashSet, + ) -> Result<(u64, + HashSet, // added + HashSet)> // removed + { + let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; + + let state_hash = self.calculate_hash( + &new_state_ids_compressed + .iter() + .map(|bytes| &bytes[..]) + .collect::>(), + ); + + let (new_shortstatehash, already_existed) = + self.get_or_create_shortstatehash(&state_hash, &db.globals)?; + + if Some(new_shortstatehash) == previous_shortstatehash { + return Ok(()); + } + + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + + let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() + { + let statediffnew: HashSet<_> = new_state_ids_compressed + .difference(&parent_stateinfo.1) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .1 + .difference(&new_state_ids_compressed) + .copied() + .collect(); + + (statediffnew, statediffremoved) + } else { + (new_state_ids_compressed, HashSet::new()) + }; + + if !already_existed { + self.save_state_from_diff( + new_shortstatehash, + statediffnew.clone(), + statediffremoved, + 2, // every state change is 2 event changes on average + states_parents, + )?; + }; + + Ok((new_shortstatehash, statediffnew, statediffremoved)) + } + #[tracing::instrument(skip(self))] pub fn get_auth_chain_from_cache<'a>( &'a self, @@ -298,4 +356,3 @@ Ok(()) } - From 28644f236e7e8871b365696a3c03e43adbd19aaa Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:50 +0200 Subject: [PATCH 315/445] refactor: prepare src/database/key_value.rs from src/service/rooms/state/data.rs --- src/{service/rooms/state/data.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state/data.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/state/data.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/state/data.rs rename to src/database/key_value.rs From 1442c64420345b7e6dff7e19cb628348d3d96c6a Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:50 +0200 Subject: [PATCH 316/445] refactor: restore src/service/rooms/state/data.rs --- src/service/rooms/state/data.rs | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) create mode 100644 src/service/rooms/state/data.rs diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs new file mode 100644 index 00000000..4b42ca8e --- /dev/null +++ b/src/service/rooms/state/data.rs @@ -0,0 +1,16 @@ +pub trait Data { + fn get_room_shortstatehash(room_id: &RoomId); +} + + /// Returns the last state hash key added to the db for the given room. + #[tracing::instrument(skip(self))] + pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.roomid_shortstatehash + .get(room_id.as_bytes())? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") + })?)) + }) + } + From 33c0e0f430663e48c012dbb71d328dce5a2a14a8 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:57 +0200 Subject: [PATCH 317/445] refactor: prepare src/database/key_value.rs from src/service/rooms/alias/mod.rs --- src/{service/rooms/alias/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/alias/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/alias/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/alias/mod.rs rename to src/database/key_value.rs From a2a327af7caf309a5ef7d95e2010e25d0e75d019 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:57 +0200 Subject: [PATCH 318/445] refactor: prepare src/database/key_value.rs from src/service/rooms/state/mod.rs --- src/{service/rooms/state/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/state/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/state/mod.rs rename to src/database/key_value.rs From 05487c7c158a7346c631efbb93b88bd3203ef3bc Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:57 +0200 Subject: [PATCH 319/445] refactor: restore src/service/rooms/alias/mod.rs --- src/service/rooms/alias/mod.rs | 66 ++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 src/service/rooms/alias/mod.rs diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs new file mode 100644 index 00000000..393ad671 --- /dev/null +++ b/src/service/rooms/alias/mod.rs @@ -0,0 +1,66 @@ + + #[tracing::instrument(skip(self, globals))] + pub fn set_alias( + &self, + alias: &RoomAliasId, + room_id: Option<&RoomId>, + globals: &super::globals::Globals, + ) -> Result<()> { + if let Some(room_id) = room_id { + // New alias + self.alias_roomid + .insert(alias.alias().as_bytes(), room_id.as_bytes())?; + let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xff); + aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; + } else { + // room_id=None means remove alias + if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { + let mut prefix = room_id.to_vec(); + prefix.push(0xff); + + for (key, _) in self.aliasid_alias.scan_prefix(prefix) { + self.aliasid_alias.remove(&key)?; + } + self.alias_roomid.remove(alias.alias().as_bytes())?; + } else { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Alias does not exist.", + )); + } + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { + self.alias_roomid + .get(alias.alias().as_bytes())? + .map(|bytes| { + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in alias_roomid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + pub fn room_aliases<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator>> + 'a { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) + }) + } + From adafb335ffbfae4097d008685af78c2b15fa0f0d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:57 +0200 Subject: [PATCH 320/445] refactor: restore src/service/rooms/state/mod.rs --- src/service/rooms/state/mod.rs | 296 +++++++++++++++++++++++++++++++++ 1 file changed, 296 insertions(+) create mode 100644 src/service/rooms/state/mod.rs diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs new file mode 100644 index 00000000..da03ad4c --- /dev/null +++ b/src/service/rooms/state/mod.rs @@ -0,0 +1,296 @@ +pub struct Service { + db: D, +} + +impl Service { + /// Set the room to the given statehash and update caches. + #[tracing::instrument(skip(self, new_state_ids_compressed, db))] + pub fn force_state( + &self, + room_id: &RoomId, + shortstatehash: u64, + statediffnew :HashSet, + statediffremoved :HashSet, + db: &Database, + ) -> Result<()> { + + for event_id in statediffnew.into_iter().filter_map(|new| { + self.parse_compressed_state_event(new) + .ok() + .map(|(_, id)| id) + }) { + let pdu = match self.get_pdu_json(&event_id)? { + Some(pdu) => pdu, + None => continue, + }; + + if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { + continue; + } + + let pdu: PduEvent = match serde_json::from_str( + &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), + ) { + Ok(pdu) => pdu, + Err(_) => continue, + }; + + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + + let membership = match serde_json::from_str::(pdu.content.get()) { + Ok(e) => e.membership, + Err(_) => continue, + }; + + let state_key = match pdu.state_key { + Some(k) => k, + None => continue, + }; + + let user_id = match UserId::parse(state_key) { + Ok(id) => id, + Err(_) => continue, + }; + + self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; + } + + self.update_joined_count(room_id, db)?; + + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; + + Ok(()) + } + + /// Returns the leaf pdus of a room. + #[tracing::instrument(skip(self))] + pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + self.roomid_pduleaves + .scan_prefix(prefix) + .map(|(_, bytes)| { + EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) + }) + .collect() + } + + /// Replace the leaves of a room. + /// + /// The provided `event_ids` become the new leaves, this allows a room to have multiple + /// `prev_events`. + #[tracing::instrument(skip(self))] + pub fn replace_pdu_leaves<'a>( + &self, + room_id: &RoomId, + event_ids: impl IntoIterator + Debug, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { + self.roomid_pduleaves.remove(&key)?; + } + + for event_id in event_ids { + let mut key = prefix.to_owned(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } + + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + #[tracing::instrument(skip(self, state_ids_compressed, globals))] + pub fn set_event_state( + &self, + event_id: &EventId, + room_id: &RoomId, + state_ids_compressed: HashSet, + globals: &super::globals::Globals, + ) -> Result<()> { + let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; + + let previous_shortstatehash = self.current_shortstatehash(room_id)?; + + let state_hash = self.calculate_hash( + &state_ids_compressed + .iter() + .map(|s| &s[..]) + .collect::>(), + ); + + let (shortstatehash, already_existed) = + self.get_or_create_shortstatehash(&state_hash, globals)?; + + if !already_existed { + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: HashSet<_> = state_ids_compressed + .difference(&parent_stateinfo.1) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .1 + .difference(&state_ids_compressed) + .copied() + .collect(); + + (statediffnew, statediffremoved) + } else { + (state_ids_compressed, HashSet::new()) + }; + self.save_state_from_diff( + shortstatehash, + statediffnew, + statediffremoved, + 1_000_000, // high number because no state will be based on this one + states_parents, + )?; + } + + self.shorteventid_shortstatehash + .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + + Ok(()) + } + + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + #[tracing::instrument(skip(self, new_pdu, globals))] + pub fn append_to_state( + &self, + new_pdu: &PduEvent, + globals: &super::globals::Globals, + ) -> Result { + let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; + + let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; + + if let Some(p) = previous_shortstatehash { + self.shorteventid_shortstatehash + .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; + } + + if let Some(state_key) = &new_pdu.state_key { + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + + let shortstatekey = self.get_or_create_shortstatekey( + &new_pdu.kind.to_string().into(), + state_key, + globals, + )?; + + let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; + + let replaces = states_parents + .last() + .map(|info| { + info.1 + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); + + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } + + // TODO: statehash with deterministic inputs + let shortstatehash = globals.next_count()?; + + let mut statediffnew = HashSet::new(); + statediffnew.insert(new); + + let mut statediffremoved = HashSet::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(*replaces); + } + + self.save_state_from_diff( + shortstatehash, + statediffnew, + statediffremoved, + 2, + states_parents, + )?; + + Ok(shortstatehash) + } else { + Ok(previous_shortstatehash.expect("first event in room must be a state event")) + } + } + + #[tracing::instrument(skip(self, invite_event))] + pub fn calculate_invite_state( + &self, + invite_event: &PduEvent, + ) -> Result>> { + let mut state = Vec::new(); + // Add recommended events + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get( + &invite_event.room_id, + &StateEventType::RoomCanonicalAlias, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = + self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? + { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = self.room_state_get( + &invite_event.room_id, + &StateEventType::RoomMember, + invite_event.sender.as_str(), + )? { + state.push(e.to_stripped_state_event()); + } + + state.push(invite_event.to_stripped_state_event()); + Ok(state) + } + + #[tracing::instrument(skip(self))] + pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; + + Ok(()) + } +} From 9e1ab74bb438c60a8ccb90af98af36f1da4fb3df Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:58 +0200 Subject: [PATCH 321/445] refactor: prepare src/database/key_value.rs from src/service/rooms/directory/mod.rs --- src/{service/rooms/directory/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/directory/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/directory/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/directory/mod.rs rename to src/database/key_value.rs From a563b1ba9a9e60ab0d4c6c7b787b3855e048647e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:58 +0200 Subject: [PATCH 322/445] refactor: prepare src/database/key_value.rs from src/service/rooms/edus/mod.rs --- src/{service/rooms/edus/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/edus/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/edus/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/edus/mod.rs rename to src/database/key_value.rs From 0071a9cbf4bef95b7125d11d7950dfee991cd625 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:58 +0200 Subject: [PATCH 323/445] refactor: restore src/service/rooms/directory/mod.rs --- src/service/rooms/directory/mod.rs | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) create mode 100644 src/service/rooms/directory/mod.rs diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs new file mode 100644 index 00000000..8be7bd57 --- /dev/null +++ b/src/service/rooms/directory/mod.rs @@ -0,0 +1,29 @@ + + #[tracing::instrument(skip(self))] + pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { + if public { + self.publicroomids.insert(room_id.as_bytes(), &[])?; + } else { + self.publicroomids.remove(room_id.as_bytes())?; + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn is_public_room(&self, room_id: &RoomId) -> Result { + Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) + } + + #[tracing::instrument(skip(self))] + pub fn public_rooms(&self) -> impl Iterator>> + '_ { + self.publicroomids.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) + }) + } + From 85e571baddb6e5ac7e3bc81ac94bce48a0d14981 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:59 +0200 Subject: [PATCH 324/445] refactor: prepare src/database/key_value.rs from src/service/rooms/lazy_loading/mod.rs --- src/{service/rooms/lazy_loading/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/lazy_loading/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/lazy_loading/mod.rs rename to src/database/key_value.rs From 931c8ece4a8c984effc8ff87b9228b237611a7dc Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:59 +0200 Subject: [PATCH 325/445] refactor: prepare src/database/key_value.rs from src/service/rooms/metadata/mod.rs --- src/{service/rooms/metadata/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/metadata/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/metadata/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/metadata/mod.rs rename to src/database/key_value.rs From 06bfddf0daed72d1c0a408faa565091ad6208ffb Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:59 +0200 Subject: [PATCH 326/445] refactor: restore src/service/rooms/lazy_loading/mod.rs --- src/service/rooms/lazy_loading/mod.rs | 91 +++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) create mode 100644 src/service/rooms/lazy_loading/mod.rs diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs new file mode 100644 index 00000000..a402702a --- /dev/null +++ b/src/service/rooms/lazy_loading/mod.rs @@ -0,0 +1,91 @@ + + #[tracing::instrument(skip(self))] + pub fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(ll_user.as_bytes()); + Ok(self.lazyloadedids.get(&key)?.is_some()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_mark_sent( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + lazy_load: HashSet>, + count: u64, + ) { + self.lazy_load_waiting.lock().unwrap().insert( + ( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + count, + ), + lazy_load, + ); + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + since: u64, + ) -> Result<()> { + if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + since, + )) { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); + + for ll_id in user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(ll_id.as_bytes()); + self.lazyloadedids.insert(&key, &[])?; + } + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_reset( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.lazyloadedids.scan_prefix(prefix) { + self.lazyloadedids.remove(&key)?; + } + + Ok(()) + } + From 42fe118cbe608e0b39a589b9192c47f77b2a313f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:46:59 +0200 Subject: [PATCH 327/445] refactor: restore src/service/rooms/edus/mod.rs --- src/service/rooms/edus/mod.rs | 550 ++++++++++++++++++++++++++++++++++ 1 file changed, 550 insertions(+) create mode 100644 src/service/rooms/edus/mod.rs diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs new file mode 100644 index 00000000..118efd4c --- /dev/null +++ b/src/service/rooms/edus/mod.rs @@ -0,0 +1,550 @@ +use crate::{database::abstraction::Tree, utils, Error, Result}; +use ruma::{ + events::{ + presence::{PresenceEvent, PresenceEventContent}, + receipt::ReceiptEvent, + SyncEphemeralRoomEvent, + }, + presence::PresenceState, + serde::Raw, + signatures::CanonicalJsonObject, + RoomId, UInt, UserId, +}; +use std::{ + collections::{HashMap, HashSet}, + mem, + sync::Arc, +}; + +pub struct RoomEdus { + pub(in super::super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId + pub(in super::super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count + pub(in super::super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count + pub(in super::super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count + pub(in super::super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count + pub(in super::super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(in super::super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count +} + +impl RoomEdus { + /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). + pub fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: ReceiptEvent, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + // Remove old entry + if let Some((old, _)) = self + .readreceiptid_readreceipt + .iter_from(&last_possible_key, true) + .take_while(|(key, _)| key.starts_with(&prefix)) + .find(|(key, _)| { + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element") + == user_id.as_bytes() + }) + { + // This is the old room_latest + self.readreceiptid_readreceipt.remove(&old)?; + } + + let mut room_latest_id = prefix; + room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); + room_latest_id.push(0xff); + room_latest_id.extend_from_slice(user_id.as_bytes()); + + self.readreceiptid_readreceipt.insert( + &room_latest_id, + &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), + )?; + + Ok(()) + } + + /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + #[tracing::instrument(skip(self))] + pub fn readreceipts_since<'a>( + &'a self, + room_id: &RoomId, + since: u64, + ) -> impl Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + > + 'a { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + let prefix2 = prefix.clone(); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + + self.readreceiptid_readreceipt + .iter_from(&first_possible_edu, false) + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(k, v)| { + let count = + utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) + .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + let user_id = UserId::parse( + utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) + .map_err(|_| { + Error::bad_database("Invalid readreceiptid userid bytes in db.") + })?, + ) + .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; + + let mut json = serde_json::from_slice::(&v).map_err(|_| { + Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") + })?; + json.remove("room_id"); + + Ok(( + user_id, + count, + Raw::from_json( + serde_json::value::to_raw_value(&json).expect("json is valid raw value"), + ), + )) + }) + } + + /// Sets a private read marker at `count`. + #[tracing::instrument(skip(self, globals))] + pub fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_privateread + .insert(&key, &count.to_be_bytes())?; + + self.roomuserid_lastprivatereadupdate + .insert(&key, &globals.next_count()?.to_be_bytes())?; + + Ok(()) + } + + /// Returns the private read marker. + #[tracing::instrument(skip(self))] + pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_privateread + .get(&key)? + .map_or(Ok(None), |v| { + Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { + Error::bad_database("Invalid private read marker bytes") + })?)) + }) + } + + /// Returns the count of the last typing update in this room. + pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + Ok(self + .roomuserid_lastprivatereadupdate + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") + }) + }) + .transpose()? + .unwrap_or(0)) + } + + /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is + /// called. + pub fn typing_add( + &self, + user_id: &UserId, + room_id: &RoomId, + timeout: u64, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let count = globals.next_count()?.to_be_bytes(); + + let mut room_typing_id = prefix; + room_typing_id.extend_from_slice(&timeout.to_be_bytes()); + room_typing_id.push(0xff); + room_typing_id.extend_from_slice(&count); + + self.typingid_userid + .insert(&room_typing_id, &*user_id.as_bytes())?; + + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &count)?; + + Ok(()) + } + + /// Removes a user from typing before the timeout is reached. + pub fn typing_remove( + &self, + user_id: &UserId, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let user_id = user_id.to_string(); + + let mut found_outdated = false; + + // Maybe there are multiple ones from calling roomtyping_add multiple times + for outdated_edu in self + .typingid_userid + .scan_prefix(prefix) + .filter(|(_, v)| &**v == user_id.as_bytes()) + { + self.typingid_userid.remove(&outdated_edu.0)?; + found_outdated = true; + } + + if found_outdated { + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + } + + Ok(()) + } + + /// Makes sure that typing events with old timestamps get removed. + fn typings_maintain( + &self, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let current_timestamp = utils::millis_since_unix_epoch(); + + let mut found_outdated = false; + + // Find all outdated edus before inserting a new one + for outdated_edu in self + .typingid_userid + .scan_prefix(prefix) + .map(|(key, _)| { + Ok::<_, Error>(( + key.clone(), + utils::u64_from_bytes( + &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { + Error::bad_database("RoomTyping has invalid timestamp or delimiters.") + })?[0..mem::size_of::()], + ) + .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, + )) + }) + .filter_map(|r| r.ok()) + .take_while(|&(_, timestamp)| timestamp < current_timestamp) + { + // This is an outdated edu (time > timestamp) + self.typingid_userid.remove(&outdated_edu.0)?; + found_outdated = true; + } + + if found_outdated { + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + } + + Ok(()) + } + + /// Returns the count of the last typing update in this room. + #[tracing::instrument(skip(self, globals))] + pub fn last_typing_update( + &self, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result { + self.typings_maintain(room_id, globals)?; + + Ok(self + .roomid_lasttypingupdate + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") + }) + }) + .transpose()? + .unwrap_or(0)) + } + + pub fn typings_all( + &self, + room_id: &RoomId, + ) -> Result> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut user_ids = HashSet::new(); + + for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { + let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { + Error::bad_database("User ID in typingid_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + + user_ids.insert(user_id); + } + + Ok(SyncEphemeralRoomEvent { + content: ruma::events::typing::TypingEventContent { + user_ids: user_ids.into_iter().collect(), + }, + }) + } + + /// Adds a presence event which will be saved until a new event replaces it. + /// + /// Note: This method takes a RoomId because presence updates are always bound to rooms to + /// make sure users outside these rooms can't see them. + pub fn update_presence( + &self, + user_id: &UserId, + room_id: &RoomId, + presence: PresenceEvent, + globals: &super::super::globals::Globals, + ) -> Result<()> { + // TODO: Remove old entry? Or maybe just wipe completely from time to time? + + let count = globals.next_count()?.to_be_bytes(); + + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count); + presence_id.push(0xff); + presence_id.extend_from_slice(presence.sender.as_bytes()); + + self.presenceid_presence.insert( + &presence_id, + &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), + )?; + + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + + Ok(()) + } + + /// Resets the presence timeout, so the user will stay in their current presence state. + #[tracing::instrument(skip(self))] + pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + + Ok(()) + } + + /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. + pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + self.userid_lastpresenceupdate + .get(user_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") + }) + }) + .transpose() + } + + pub fn get_last_presence_event( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result> { + let last_update = match self.last_presence_update(user_id)? { + Some(last) => last, + None => return Ok(None), + }; + + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&last_update.to_be_bytes()); + presence_id.push(0xff); + presence_id.extend_from_slice(user_id.as_bytes()); + + self.presenceid_presence + .get(&presence_id)? + .map(|value| { + let mut presence: PresenceEvent = serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } + + Ok(presence) + }) + .transpose() + } + + /// Sets all users to offline who have been quiet for too long. + fn _presence_maintain( + &self, + rooms: &super::Rooms, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let current_timestamp = utils::millis_since_unix_epoch(); + + for (user_id_bytes, last_timestamp) in self + .userid_lastpresenceupdate + .iter() + .filter_map(|(k, bytes)| { + Some(( + k, + utils::u64_from_bytes(&bytes) + .map_err(|_| { + Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") + }) + .ok()?, + )) + }) + .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) + // 5 Minutes + { + // Send new presence events to set the user offline + let count = globals.next_count()?.to_be_bytes(); + let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) + .map_err(|_| { + Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") + })? + .try_into() + .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; + for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count); + presence_id.push(0xff); + presence_id.extend_from_slice(&user_id_bytes); + + self.presenceid_presence.insert( + &presence_id, + &serde_json::to_vec(&PresenceEvent { + content: PresenceEventContent { + avatar_url: None, + currently_active: None, + displayname: None, + last_active_ago: Some( + last_timestamp.try_into().expect("time is valid"), + ), + presence: PresenceState::Offline, + status_msg: None, + }, + sender: user_id.to_owned(), + }) + .expect("PresenceEvent can be serialized"), + )?; + } + + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + } + + Ok(()) + } + + /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. + #[tracing::instrument(skip(self, since, _rooms, _globals))] + pub fn presence_since( + &self, + room_id: &RoomId, + since: u64, + _rooms: &super::Rooms, + _globals: &super::super::globals::Globals, + ) -> Result, PresenceEvent>> { + //self.presence_maintain(rooms, globals)?; + + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + let mut hashmap = HashMap::new(); + + for (key, value) in self + .presenceid_presence + .iter_from(&*first_possible_edu, false) + .take_while(|(key, _)| key.starts_with(&prefix)) + { + let user_id = UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, + ) + .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; + + let mut presence: PresenceEvent = serde_json::from_slice(&value) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } + + hashmap.insert(user_id, presence); + } + + Ok(hashmap) + } +} From 715b30a2b5ea9f2511e157ac23ced07183f25a85 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:00 +0200 Subject: [PATCH 328/445] refactor: prepare src/database/key_value.rs from src/service/rooms/outlier/mod.rs --- src/{service/rooms/outlier/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/outlier/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/outlier/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/outlier/mod.rs rename to src/database/key_value.rs From daa969508fe17a7d87bdfff5c8163aae469545af Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:00 +0200 Subject: [PATCH 329/445] refactor: restore src/service/rooms/outlier/mod.rs --- src/service/rooms/outlier/mod.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 src/service/rooms/outlier/mod.rs diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs new file mode 100644 index 00000000..340e93e4 --- /dev/null +++ b/src/service/rooms/outlier/mod.rs @@ -0,0 +1,27 @@ + /// Returns the pdu from the outlier tree. + pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + + /// Append the PDU as an outlier. + #[tracing::instrument(skip(self, pdu))] + pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + self.eventid_outlierpdu.insert( + event_id.as_bytes(), + &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), + ) + } + From 0ce4446b1ab264378fb96029d420391aefdfdb91 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:00 +0200 Subject: [PATCH 330/445] refactor: restore src/service/rooms/metadata/mod.rs --- src/service/rooms/metadata/mod.rs | 44 +++++++++++++++++++++++++++++++ 1 file changed, 44 insertions(+) create mode 100644 src/service/rooms/metadata/mod.rs diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs new file mode 100644 index 00000000..5d703451 --- /dev/null +++ b/src/service/rooms/metadata/mod.rs @@ -0,0 +1,44 @@ + /// Checks if a room exists. + #[tracing::instrument(skip(self))] + pub fn exists(&self, room_id: &RoomId) -> Result { + let prefix = match self.get_shortroomid(room_id)? { + Some(b) => b.to_be_bytes().to_vec(), + None => return Ok(false), + }; + + // Look for PDUs in that room. + Ok(self + .pduid_pdu + .iter_from(&prefix, false) + .next() + .filter(|(k, _)| k.starts_with(&prefix)) + .is_some()) + } + + pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.roomid_shortroomid + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + }) + .transpose() + } + + pub fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + globals: &super::globals::Globals, + ) -> Result { + Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, + None => { + let short = globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + } + }) + } + From 1ccc226c6b195d84c14e1fdf5e0ade5feefb6872 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:01 +0200 Subject: [PATCH 331/445] refactor: prepare src/database/key_value.rs from src/service/rooms/pdu_metadata/mod.rs --- src/{service/rooms/pdu_metadata/mod.rs => database/key_value.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/pdu_metadata/mod.rs => database/key_value.rs} (100%) diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/database/key_value.rs similarity index 100% rename from src/service/rooms/pdu_metadata/mod.rs rename to src/database/key_value.rs From 81ac01c2f56669521fa55409efbeba6785230239 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 15 Aug 2022 18:47:01 +0200 Subject: [PATCH 332/445] refactor: restore src/service/rooms/pdu_metadata/mod.rs --- src/service/rooms/pdu_metadata/mod.rs | 31 +++++++++++++++++++++++++++ 1 file changed, 31 insertions(+) create mode 100644 src/service/rooms/pdu_metadata/mod.rs diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs new file mode 100644 index 00000000..f8ffcee1 --- /dev/null +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -0,0 +1,31 @@ + + #[tracing::instrument(skip(self, room_id, event_ids))] + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + for prev in event_ids { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(prev.as_bytes()); + self.referencedevents.insert(&key, &[])?; + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(event_id.as_bytes()); + Ok(self.referencedevents.get(&key)?.is_some()) + } + + #[tracing::instrument(skip(self))] + pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + self.softfailedeventids.insert(event_id.as_bytes(), &[]) + } + + #[tracing::instrument(skip(self))] + pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + self.softfailedeventids + .get(event_id.as_bytes()) + .map(|o| o.is_some()) + } + From 865e35df17f7e9e3dba21765871285f0d1bdeeeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 25 Jun 2022 16:12:23 +0200 Subject: [PATCH 333/445] Work on rooms/state, database, alias, directory, edus services, event_handler, lazy_loading, metadata, outlier, and pdu_metadata --- src/database/abstraction.rs | 4 +- src/database/key_value.rs | 722 ++------- src/database/mod.rs | 48 +- src/main.rs | 51 +- src/service/rooms/alias/data.rs | 22 + src/service/rooms/alias/mod.rs | 72 +- src/service/rooms/directory/data.rs | 13 + src/service/rooms/directory/mod.rs | 35 +- src/service/rooms/edus/data.rs | 91 ++ src/service/rooms/edus/mod.rs | 350 +---- src/service/rooms/event_handler/mod.rs | 1988 ++++++++++++------------ src/service/rooms/lazy_loading/data.rs | 24 + src/service/rooms/lazy_loading/mod.rs | 55 +- src/service/rooms/metadata/data.rs | 3 + src/service/rooms/metadata/mod.rs | 52 +- src/service/rooms/outlier/data.rs | 5 + src/service/rooms/outlier/mod.rs | 29 +- src/service/rooms/pdu_metadata/data.rs | 6 + src/service/rooms/pdu_metadata/mod.rs | 29 +- src/service/rooms/short/mod.rs | 27 + src/service/rooms/state/data.rs | 32 +- src/service/rooms/state/mod.rs | 86 +- 22 files changed, 1503 insertions(+), 2241 deletions(-) create mode 100644 src/service/rooms/alias/data.rs create mode 100644 src/service/rooms/directory/data.rs create mode 100644 src/service/rooms/edus/data.rs create mode 100644 src/service/rooms/lazy_loading/data.rs create mode 100644 src/service/rooms/metadata/data.rs create mode 100644 src/service/rooms/outlier/data.rs create mode 100644 src/service/rooms/pdu_metadata/data.rs diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 74f3a45a..29325bd6 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -26,7 +26,7 @@ pub mod persy; ))] pub mod watchers; -pub trait DatabaseEngine: Send + Sync { +pub trait KeyValueDatabaseEngine: Send + Sync { fn open(config: &Config) -> Result where Self: Sized; @@ -40,7 +40,7 @@ pub trait DatabaseEngine: Send + Sync { } } -pub trait Tree: Send + Sync { +pub trait KeyValueTree: Send + Sync { fn get(&self, key: &[u8]) -> Result>>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; diff --git a/src/database/key_value.rs b/src/database/key_value.rs index 1a793f39..34916e4b 100644 --- a/src/database/key_value.rs +++ b/src/database/key_value.rs @@ -1,10 +1,7 @@ -pub trait Data { - fn get_room_shortstatehash(room_id: &RoomId); -} +use crate::service; - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { +impl service::room::state::Data for KeyValueDatabase { + fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash .get(room_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -14,77 +11,21 @@ pub trait Data { }) } -pub struct Service { - db: D, -} - -impl Service { - /// Set the room to the given statehash and update caches. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - shortstatehash: u64, - statediffnew :HashSet, - statediffremoved :HashSet, - db: &Database, - ) -> Result<()> { - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::parse(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - + fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 + _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; + Ok(()) + } + fn set_event_state(&self) -> Result<()> { + db.shorteventid_shortstatehash + .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { + fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -99,15 +40,11 @@ impl Service { .collect() } - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( + fn set_forward_extremities( &self, room_id: &RoomId, event_ids: impl IntoIterator + Debug, + _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -125,230 +62,48 @@ impl Service { Ok(()) } - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( +} + +impl service::room::alias::Data for KeyValueDatabase { + fn set_alias( &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, + alias: &RoomAliasId, + room_id: Option<&RoomId> ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - + self.alias_roomid + .insert(alias.alias().as_bytes(), room_id.as_bytes())?; + let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xff); + aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; Ok(()) } - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( + fn remove_alias( &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = self.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, - globals, - )?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); + alias: &RoomAliasId, + ) -> Result<()> { + if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { + let mut prefix = room_id.to_vec(); + prefix.push(0xff); - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); + for (key, _) in self.aliasid_alias.scan_prefix(prefix) { + self.aliasid_alias.remove(&key)?; } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) + self.alias_roomid.remove(alias.alias().as_bytes())?; } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Alias does not exist.", + )); } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomCanonicalAlias, - "", - )? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &StateEventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) } -} - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( + fn resolve_local_alias( &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, + alias: &RoomAliasId ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { @@ -360,11 +115,10 @@ impl Service { .transpose() } - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, + fn local_aliases_for_room( + &self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -375,26 +129,22 @@ impl Service { .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) }) } +} +impl service::room::directory::Data for KeyValueDatabase { + fn set_public(&self, room_id: &RoomId) -> Result<()> { + self.publicroomids.insert(room_id.as_bytes(), &[])?; + } - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) + fn set_not_public(&self, room_id: &RoomId) -> Result<()> { + self.publicroomids.remove(room_id.as_bytes())?; } - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { + fn is_public_room(&self, room_id: &RoomId) -> Result { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { + fn public_rooms(&self) -> impl Iterator>> + '_ { self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { @@ -404,43 +154,14 @@ impl Service { .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) }) } - -use crate::{database::abstraction::Tree, utils, Error, Result}; -use ruma::{ - events::{ - presence::{PresenceEvent, PresenceEventContent}, - receipt::ReceiptEvent, - SyncEphemeralRoomEvent, - }, - presence::PresenceState, - serde::Raw, - signatures::CanonicalJsonObject, - RoomId, UInt, UserId, -}; -use std::{ - collections::{HashMap, HashSet}, - mem, - sync::Arc, -}; - -pub struct RoomEdus { - pub(in super::super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId - pub(in super::super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count - pub(in super::super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(in super::super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count - pub(in super::super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(in super::super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId - pub(in super::super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count } -impl RoomEdus { - /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). - pub fn readreceipt_update( +impl service::room::edus::Data for KeyValueDatabase { + fn readreceipt_update( &self, user_id: &UserId, room_id: &RoomId, event: ReceiptEvent, - globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -477,8 +198,6 @@ impl RoomEdus { Ok(()) } - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] pub fn readreceipts_since<'a>( &'a self, room_id: &RoomId, @@ -527,14 +246,11 @@ impl RoomEdus { }) } - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( + fn private_read_set( &self, room_id: &RoomId, user_id: &UserId, count: u64, - globals: &super::super::globals::Globals, ) -> Result<()> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); @@ -545,13 +261,9 @@ impl RoomEdus { self.roomuserid_lastprivatereadupdate .insert(&key, &globals.next_count()?.to_be_bytes())?; - - Ok(()) } - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(user_id.as_bytes()); @@ -565,8 +277,7 @@ impl RoomEdus { }) } - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(user_id.as_bytes()); @@ -583,9 +294,7 @@ impl RoomEdus { .unwrap_or(0)) } - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( + fn typing_add( &self, user_id: &UserId, room_id: &RoomId, @@ -611,12 +320,10 @@ impl RoomEdus { Ok(()) } - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( + fn typing_remove( &self, user_id: &UserId, room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -643,59 +350,10 @@ impl RoomEdus { Ok(()) } - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( + fn last_typing_update( &self, room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result { - self.typings_maintain(room_id, globals)?; - Ok(self .roomid_lasttypingupdate .get(room_id.as_bytes())? @@ -708,10 +366,10 @@ impl RoomEdus { .unwrap_or(0)) } - pub fn typings_all( + fn typings_all( &self, room_id: &RoomId, - ) -> Result> { + ) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -726,23 +384,14 @@ impl RoomEdus { user_ids.insert(user_id); } - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) + Ok(user_ids) } - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( + fn update_presence( &self, user_id: &UserId, room_id: &RoomId, presence: PresenceEvent, - globals: &super::super::globals::Globals, ) -> Result<()> { // TODO: Remove old entry? Or maybe just wipe completely from time to time? @@ -767,8 +416,6 @@ impl RoomEdus { Ok(()) } - /// Resets the presence timeout, so the user will stay in their current presence state. - #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( user_id.as_bytes(), @@ -778,8 +425,7 @@ impl RoomEdus { Ok(()) } - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + fn last_presence_update(&self, user_id: &UserId) -> Result> { self.userid_lastpresenceupdate .get(user_id.as_bytes())? .map(|bytes| { @@ -790,125 +436,29 @@ impl RoomEdus { .transpose() } - pub fn get_last_presence_event( + fn get_presence_event( &self, user_id: &UserId, room_id: &RoomId, + count: u64, ) -> Result> { - let last_update = match self.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - let mut presence_id = room_id.as_bytes().to_vec(); presence_id.push(0xff); - presence_id.extend_from_slice(&last_update.to_be_bytes()); + presence_id.extend_from_slice(&count.to_be_bytes()); presence_id.push(0xff); presence_id.extend_from_slice(user_id.as_bytes()); self.presenceid_presence .get(&presence_id)? - .map(|value| { - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - Ok(presence) - }) + .map(|value| parse_presence_event(&value)) .transpose() } - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - } - - /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( + fn presence_since( &self, room_id: &RoomId, since: u64, - _rooms: &super::Rooms, - _globals: &super::super::globals::Globals, ) -> Result, PresenceEvent>> { - //self.presence_maintain(rooms, globals)?; - let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -931,23 +481,7 @@ impl RoomEdus { ) .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } + let presence = parse_presence_event(&value)?; hashmap.insert(user_id, presence); } @@ -956,8 +490,28 @@ impl RoomEdus { } } - #[tracing::instrument(skip(self))] - pub fn lazy_load_was_sent_before( +fn parse_presence_event(bytes: &[u8]) -> Result { + let mut presence: PresenceEvent = serde_json::from_slice(bytes) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } +} + +impl service::room::lazy_load::Data for KeyValueDatabase { + fn lazy_load_was_sent_before( &self, user_id: &UserId, device_id: &DeviceId, @@ -974,28 +528,7 @@ impl RoomEdus { Ok(self.lazyloadedids.get(&key)?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn lazy_load_mark_sent( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - lazy_load: HashSet>, - count: u64, - ) { - self.lazy_load_waiting.lock().unwrap().insert( - ( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - count, - ), - lazy_load, - ); - } - - #[tracing::instrument(skip(self))] - pub fn lazy_load_confirm_delivery( + fn lazy_load_confirm_delivery( &self, user_id: &UserId, device_id: &DeviceId, @@ -1025,8 +558,7 @@ impl RoomEdus { Ok(()) } - #[tracing::instrument(skip(self))] - pub fn lazy_load_reset( + fn lazy_load_reset( &self, user_id: &UserId, device_id: &DeviceId, @@ -1045,10 +577,10 @@ impl RoomEdus { Ok(()) } +} - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { +impl service::room::metadata::Data for KeyValueDatabase { + fn exists(&self, room_id: &RoomId) -> Result { let prefix = match self.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), None => return Ok(false), @@ -1062,36 +594,10 @@ impl RoomEdus { .filter(|(k, _)| k.starts_with(&prefix)) .is_some()) } +} - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - /// Returns the pdu from the outlier tree. - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { +impl service::room::outlier::Data for KeyValueDatabase { + fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { @@ -1099,8 +605,7 @@ impl RoomEdus { }) } - /// Returns the pdu from the outlier tree. - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? .map_or(Ok(None), |pdu| { @@ -1108,18 +613,16 @@ impl RoomEdus { }) } - /// Append the PDU as an outlier. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { self.eventid_outlierpdu.insert( event_id.as_bytes(), &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), ) } +} - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { +impl service::room::pdu_metadata::Data for KeyValueDatabase { + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(prev.as_bytes()); @@ -1129,22 +632,19 @@ impl RoomEdus { Ok(()) } - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { let mut key = room_id.as_bytes().to_vec(); key.extend_from_slice(event_id.as_bytes()); Ok(self.referencedevents.get(&key)?.is_some()) } - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { self.softfailedeventids.insert(event_id.as_bytes(), &[]) } - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + fn is_event_soft_failed(&self, event_id: &EventId) -> Result { self.softfailedeventids .get(event_id.as_bytes()) .map(|o| o.is_some()) } - +} diff --git a/src/database/mod.rs b/src/database/mod.rs index a0937c29..a35228aa 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -15,7 +15,7 @@ pub mod users; use self::admin::create_admin_room; use crate::{utils, Config, Error, Result}; -use abstraction::DatabaseEngine; +use abstraction::KeyValueDatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; @@ -39,8 +39,8 @@ use std::{ use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, info, warn}; -pub struct Database { - _db: Arc, +pub struct KeyValueDatabase { + _db: Arc, pub globals: globals::Globals, pub users: users::Users, pub uiaa: uiaa::Uiaa, @@ -55,7 +55,7 @@ pub struct Database { pub pusher: pusher::PushData, } -impl Database { +impl KeyValueDatabase { /// Tries to remove the old database but ignores all errors. pub fn try_remove(server_name: &str) -> Result<()> { let mut path = ProjectDirs::from("xyz", "koesters", "conduit") @@ -124,7 +124,7 @@ impl Database { .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; } - let builder: Arc = match &*config.database_backend { + let builder: Arc = match &*config.database_backend { "sqlite" => { #[cfg(not(feature = "sqlite"))] return Err(Error::BadConfig("Database backend not found.")); @@ -955,7 +955,7 @@ impl Database { } /// Sets the emergency password and push rules for the @conduit account in case emergency password is set -fn set_emergency_access(db: &Database) -> Result { +fn set_emergency_access(db: &KeyValueDatabase) -> Result { let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) .expect("@conduit:server_name is a valid UserId"); @@ -979,39 +979,3 @@ fn set_emergency_access(db: &Database) -> Result { res } - -pub struct DatabaseGuard(OwnedRwLockReadGuard); - -impl Deref for DatabaseGuard { - type Target = OwnedRwLockReadGuard; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -#[cfg(feature = "conduit_bin")] -#[axum::async_trait] -impl axum::extract::FromRequest for DatabaseGuard -where - B: Send, -{ - type Rejection = axum::extract::rejection::ExtensionRejection; - - async fn from_request( - req: &mut axum::extract::RequestParts, - ) -> Result { - use axum::extract::Extension; - - let Extension(db): Extension>> = - Extension::from_request(req).await?; - - Ok(DatabaseGuard(db.read_owned().await)) - } -} - -impl From> for DatabaseGuard { - fn from(val: OwnedRwLockReadGuard) -> Self { - Self(val) - } -} diff --git a/src/main.rs b/src/main.rs index 9a0928a0..a1af9761 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,27 +46,26 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; -#[tokio::main] -async fn main() { - let raw_config = - Figment::new() - .merge( - Toml::file(Env::var("CONDUIT_CONFIG").expect( - "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", - )) - .nested(), - ) - .merge(Env::prefixed("CONDUIT_").global()); - - let config = match raw_config.extract::() { - Ok(s) => s, - Err(e) => { - eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); - std::process::exit(1); - } - }; +lazy_static! { + static ref DB: Database = { + let raw_config = + Figment::new() + .merge( + Toml::file(Env::var("CONDUIT_CONFIG").expect( + "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", + )) + .nested(), + ) + .merge(Env::prefixed("CONDUIT_").global()); + + let config = match raw_config.extract::() { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); + std::process::exit(1); + } + }; - let start = async { config.warn_deprecated(); let db = match Database::load_or_create(&config).await { @@ -79,8 +78,15 @@ async fn main() { std::process::exit(1); } }; + }; +} - run_server(&config, db).await.unwrap(); +#[tokio::main] +async fn main() { + lazy_static::initialize(&DB); + + let start = async { + run_server(&config).await.unwrap(); }; if config.allow_jaeger { @@ -120,7 +126,8 @@ async fn main() { } } -async fn run_server(config: &Config, db: Arc>) -> io::Result<()> { +async fn run_server() -> io::Result<()> { + let config = DB.globals.config; let addr = SocketAddr::from((config.address, config.port)); let x_requested_with = HeaderName::from_static("x-requested-with"); diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs new file mode 100644 index 00000000..9dbfc7b5 --- /dev/null +++ b/src/service/rooms/alias/data.rs @@ -0,0 +1,22 @@ +pub trait Data { + /// Creates or updates the alias to the given room id. + pub fn set_alias( + alias: &RoomAliasId, + room_id: &RoomId + ) -> Result<()>; + + /// Forgets about an alias. Returns an error if the alias did not exist. + pub fn remove_alias( + alias: &RoomAliasId, + ) -> Result<()>; + + /// Looks up the roomid for the given alias. + pub fn resolve_local_alias( + alias: &RoomAliasId, + ) -> Result<()>; + + /// Returns all local aliases that point to the given room + pub fn local_aliases_for_room( + alias: &RoomAliasId, + ) -> Result<()>; +} diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 393ad671..cfe05396 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,66 +1,40 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self, globals))] pub fn set_alias( &self, alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, + room_id: &RoomId, ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } + self.db.set_alias(alias, room_id) + } - Ok(()) + #[tracing::instrument(skip(self, globals))] + pub fn remove_alias( + &self, + alias: &RoomAliasId, + ) -> Result<()> { + self.db.remove_alias(alias) } #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result>> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() + pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { + self.db.resolve_local_alias(alias: &RoomAliasId) } #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( + pub fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) + self.db.local_aliases_for_room(room_id) } - +} diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs new file mode 100644 index 00000000..83d78853 --- /dev/null +++ b/src/service/rooms/directory/data.rs @@ -0,0 +1,13 @@ +pub trait Data { + /// Adds the room to the public room directory + fn set_public(room_id: &RoomId) -> Result<()>; + + /// Removes the room from the public room directory. + fn set_not_public(room_id: &RoomId) -> Result<()>; + + /// Returns true if the room is in the public room directory. + fn is_public_room(room_id: &RoomId) -> Result; + + /// Returns the unsorted public room directory + fn public_rooms() -> impl Iterator>> + '_; +} diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 8be7bd57..b92933f4 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,29 +1,30 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } + pub fn set_public(&self, room_id: &RoomId) -> Result<()> { + self.db.set_public(&self, room_id) + } - Ok(()) + #[tracing::instrument(skip(self))] + pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { + self.db.set_not_public(&self, room_id) } #[tracing::instrument(skip(self))] pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) + self.db.is_public_room(&self, room_id) } #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) + self.db.public_rooms(&self, room_id) } - +} diff --git a/src/service/rooms/edus/data.rs b/src/service/rooms/edus/data.rs new file mode 100644 index 00000000..16c14cf3 --- /dev/null +++ b/src/service/rooms/edus/data.rs @@ -0,0 +1,91 @@ +pub trait Data { + /// Replaces the previous read receipt. + fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: ReceiptEvent, + ) -> Result<()>; + + /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + fn readreceipts_since( + &self, + room_id: &RoomId, + since: u64, + ) -> impl Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + >; + + /// Sets a private read marker at `count`. + fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + ) -> Result<()>; + + /// Returns the private read marker. + fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + /// Returns the count of the last typing update in this room. + fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is + /// called. + fn typing_add( + &self, + user_id: &UserId, + room_id: &RoomId, + timeout: u64, + ) -> Result<()>; + + /// Removes a user from typing before the timeout is reached. + fn typing_remove( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result<()>; + + /// Returns the count of the last typing update in this room. + fn last_typing_update( + &self, + room_id: &RoomId, + ) -> Result; + + /// Returns all user ids currently typing. + fn typings_all( + &self, + room_id: &RoomId, + ) -> Result>; + + /// Adds a presence event which will be saved until a new event replaces it. + /// + /// Note: This method takes a RoomId because presence updates are always bound to rooms to + /// make sure users outside these rooms can't see them. + fn update_presence( + &self, + user_id: &UserId, + room_id: &RoomId, + presence: PresenceEvent, + ) -> Result<()>; + + /// Resets the presence timeout, so the user will stay in their current presence state. + fn ping_presence(&self, user_id: &UserId) -> Result<()>; + + /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. + fn last_presence_update(&self, user_id: &UserId) -> Result>; + + /// Returns the presence event with correct last_active_ago. + fn get_presence_event(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result>; + + /// Returns the most recent presence updates that happened after the event with id `since`. + fn presence_since( + &self, + room_id: &RoomId, + since: u64, + ) -> Result, PresenceEvent>>; +} diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index 118efd4c..06adf57e 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -1,73 +1,21 @@ -use crate::{database::abstraction::Tree, utils, Error, Result}; -use ruma::{ - events::{ - presence::{PresenceEvent, PresenceEventContent}, - receipt::ReceiptEvent, - SyncEphemeralRoomEvent, - }, - presence::PresenceState, - serde::Raw, - signatures::CanonicalJsonObject, - RoomId, UInt, UserId, -}; -use std::{ - collections::{HashMap, HashSet}, - mem, - sync::Arc, -}; +mod data; +pub use data::Data; -pub struct RoomEdus { - pub(in super::super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId - pub(in super::super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count - pub(in super::super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(in super::super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count - pub(in super::super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(in super::super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId - pub(in super::super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count +use crate::service::*; + +pub struct Service { + db: D, } -impl RoomEdus { - /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). +impl Service<_> { + /// Replaces the previous read receipt. pub fn readreceipt_update( &self, user_id: &UserId, room_id: &RoomId, event: ReceiptEvent, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) + self.db.readreceipt_update(user_id, room_id, event); } /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. @@ -83,41 +31,7 @@ impl RoomEdus { Raw, )>, > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) + self.db.readreceipts_since(room_id, since) } /// Sets a private read marker at `count`. @@ -127,53 +41,19 @@ impl RoomEdus { room_id: &RoomId, user_id: &UserId, count: u64, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - - Ok(()) + self.db.private_read_set(room_id, user_id, count) } /// Returns the private read marker. #[tracing::instrument(skip(self))] pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) + self.db.private_read_get(room_id, user_id) } /// Returns the count of the last typing update in this room. pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) + self.db.last_privateread_update(user_id, room_id) } /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is @@ -183,25 +63,8 @@ impl RoomEdus { user_id: &UserId, room_id: &RoomId, timeout: u64, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) + self.db.typing_add(user_id, room_id, timeout) } /// Removes a user from typing before the timeout is reached. @@ -209,33 +72,11 @@ impl RoomEdus { &self, user_id: &UserId, room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) + self.db.typing_remove(user_id, room_id) } + /* TODO: Do this in background thread? /// Makes sure that typing events with old timestamps get removed. fn typings_maintain( &self, @@ -279,45 +120,23 @@ impl RoomEdus { Ok(()) } + */ /// Returns the count of the last typing update in this room. #[tracing::instrument(skip(self, globals))] pub fn last_typing_update( &self, room_id: &RoomId, - globals: &super::super::globals::Globals, ) -> Result { - self.typings_maintain(room_id, globals)?; - - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) + self.db.last_typing_update(room_id) } + /// Returns a new typing EDU. pub fn typings_all( &self, room_id: &RoomId, ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } + let user_ids = self.db.typings_all(room_id)?; Ok(SyncEphemeralRoomEvent { content: ruma::events::typing::TypingEventContent { @@ -335,52 +154,13 @@ impl RoomEdus { user_id: &UserId, room_id: &RoomId, presence: PresenceEvent, - globals: &super::super::globals::Globals, ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) + self.db.update_presence(user_id, room_id, presence) } /// Resets the presence timeout, so the user will stay in their current presence state. - #[tracing::instrument(skip(self))] pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - pub fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() + self.db.ping_presence(user_id) } pub fn get_last_presence_event( @@ -388,42 +168,15 @@ impl RoomEdus { user_id: &UserId, room_id: &RoomId, ) -> Result> { - let last_update = match self.last_presence_update(user_id)? { + let last_update = match self.db.last_presence_update(user_id)? { Some(last) => last, None => return Ok(None), }; - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&last_update.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| { - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - Ok(presence) - }) - .transpose() + self.db.get_presence_event(room_id, user_id, last_update) } + /* TODO /// Sets all users to offline who have been quiet for too long. fn _presence_maintain( &self, @@ -489,62 +242,15 @@ impl RoomEdus { } Ok(()) - } + }*/ - /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. + /// Returns the most recent presence updates that happened after the event with id `since`. #[tracing::instrument(skip(self, since, _rooms, _globals))] pub fn presence_since( &self, room_id: &RoomId, since: u64, - _rooms: &super::Rooms, - _globals: &super::super::globals::Globals, ) -> Result, PresenceEvent>> { - //self.presence_maintain(rooms, globals)?; - - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) + self.db.presence_since(room_id, since) } } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e59219b2..5b77586a 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -2,1151 +2,1157 @@ /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; -/// When receiving an event one needs to: -/// 0. Check the server is in the room -/// 1. Skip the PDU if we already know about it -/// 2. Check signatures, otherwise drop -/// 3. Check content hash, redact if doesn't match -/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not -/// timeline events -/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are -/// also rejected "due to auth events" -/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events -/// 7. Persist this event as an outlier -/// 8. If not timeline event: stop -/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline -/// events -/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities -/// doing all the checks in this list starting at 1. These are not timeline events -/// 11. Check the auth of the event passes based on the state of the event -/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by -/// doing state res where one of the inputs was a previously trusted set of state, don't just -/// trust a set of state we got from a remote) -/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" -/// it -/// 14. Use state resolution to find new room state -// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively -#[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] -pub(crate) async fn handle_incoming_pdu<'a>( - origin: &'a ServerName, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - is_timeline_event: bool, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> Result>> { - db.rooms.exists(room_id)?.ok_or(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"))?; - - db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of this room is currently disabled on this server."))?; - - // 1. Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = db.rooms.get_pdu_id(event_id)? { - return Some(pdu_id.to_vec()); - } - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")? - .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; - - let first_pdu_in_room = db - .rooms - .first_pdu_in_room(room_id)? - .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; - - let (incoming_pdu, val) = handle_outlier_pdu( - origin, - &create_event, - event_id, - room_id, - value, - db, - pub_key_map, - ) - .await?; - - // 8. if not timeline event: stop - if !is_timeline_event { - return Ok(None); - } - - // Skip old events - if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - return Ok(None); - } +use crate::service::*; + +pub struct Service; + +impl Service { + /// When receiving an event one needs to: + /// 0. Check the server is in the room + /// 1. Skip the PDU if we already know about it + /// 2. Check signatures, otherwise drop + /// 3. Check content hash, redact if doesn't match + /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not + /// timeline events + /// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are + /// also rejected "due to auth events" + /// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + /// 7. Persist this event as an outlier + /// 8. If not timeline event: stop + /// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline + /// events + /// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + /// doing all the checks in this list starting at 1. These are not timeline events + /// 11. Check the auth of the event passes based on the state of the event + /// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by + /// doing state res where one of the inputs was a previously trusted set of state, don't just + /// trust a set of state we got from a remote) + /// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" + /// it + /// 14. Use state resolution to find new room state + // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively + #[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] + pub(crate) async fn handle_incoming_pdu<'a>( + origin: &'a ServerName, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + is_timeline_event: bool, + db: &'a Database, + pub_key_map: &'a RwLock>>, + ) -> Result>> { + db.rooms.exists(room_id)?.ok_or(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"))?; + + db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of this room is currently disabled on this server."))?; + + // 1. Skip the PDU if we already have it as a timeline event + if let Some(pdu_id) = db.rooms.get_pdu_id(event_id)? { + return Some(pdu_id.to_vec()); + } - // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let sorted_prev_events = fetch_unknown_prev_events(incoming_pdu.prev_events.clone()); + let create_event = db + .rooms + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; - let mut errors = 0; - for prev_id in dbg!(sorted) { - // Check for disabled again because it might have changed - db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of - this room is currently disabled on this server."))?; + let first_pdu_in_room = db + .rooms + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; - if let Some((time, tries)) = db - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&*prev_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } + let (incoming_pdu, val) = handle_outlier_pdu( + origin, + &create_event, + event_id, + room_id, + value, + db, + pub_key_map, + ) + .await?; - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", prev_id); - continue; - } + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(None); } - if errors >= 5 { - break; + // Skip old events + if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(None); } - if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { - // Skip old events - if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - continue; - } + // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let sorted_prev_events = fetch_unknown_prev_events(incoming_pdu.prev_events.clone()); - let start_time = Instant::now(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + let mut errors = 0; + for prev_id in dbg!(sorted) { + // Check for disabled again because it might have changed + db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of + this room is currently disabled on this server."))?; - if let Err(e) = upgrade_outlier_to_timeline_pdu( - pdu, - json, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await + if let Some((time, tries)) = db + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&*prev_id) { - errors += 1; - warn!("Prev event {} failed: {}", prev_id, e); - match db - .globals - .bad_event_ratelimiter - .write() - .unwrap() - .entry((*prev_id).to_owned()) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => { - *e.get_mut() = (Instant::now(), e.get().1 + 1) - } + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", prev_id); + continue; } } - let elapsed = start_time.elapsed(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .remove(&room_id.to_owned()); - warn!( - "Handling prev event {} took {}m{}s", - prev_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - } - // Done with prev events, now handling the incoming event - - let start_time = Instant::now(); - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = upgrade_outlier_to_timeline_pdu( - incoming_pdu, - val, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await; - db.globals - .roomid_federationhandletime - .write() - .unwrap() - .remove(&room_id.to_owned()); - - r -} + if errors >= 5 { + break; + } -#[tracing::instrument(skip(create_event, value, db, pub_key_map))] -fn handle_outlier_pdu<'a>( - origin: &'a ServerName, - create_event: &'a PduEvent, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { - Box::pin(async move { - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - fetch_required_signing_keys(&value, pub_key_map, db) - .await?; - - // 2. Check signatures, otherwise drop - // 3. check content hash, redact if doesn't match - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - error!("Invalid create event: {}", e); - Error::BadDatabase("Invalid create event in db") - })?; + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { + // Skip old events + if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + continue; + } - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, - &value, - room_version_id, - ) { - Err(e) => { - // Drop - warn!("Dropping bad event {}: {}", event_id, e); - return Err("Signature verification failed".to_owned()); - } - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - warn!("Calculated hash does not match: {}", event_id); - match ruma::signatures::redact(&value, room_version_id) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_owned()), + if let Err(e) = upgrade_outlier_to_timeline_pdu( + pdu, + json, + &create_event, + origin, + db, + room_id, + pub_key_map, + ) + .await + { + errors += 1; + warn!("Prev event {} failed: {}", prev_id, e); + match db + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } } + let elapsed = start_time.elapsed(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + warn!( + "Handling prev event {} took {}m{}s", + prev_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); } - Ok(ruma::signatures::Verified::All) => value, - }; - - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type - val.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU.".to_owned())?; + } - // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // NOTE: Step 5 is not applied anymore because it failed too often - warn!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_outliers( - db, + // Done with prev events, now handling the incoming event + + let start_time = Instant::now(); + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + let r = upgrade_outlier_to_timeline_pdu( + incoming_pdu, + val, + &create_event, origin, - &incoming_pdu - .auth_events - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, + db, room_id, pub_key_map, ) .await; + db.globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); - // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - info!( - "Auth check for {} based on auth events", - incoming_pdu.event_id - ); + r + } - // Build map of auth events - let mut auth_events = HashMap::new(); - for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id)? { - Some(e) => e, - None => { - warn!("Could not find auth event {}", id); - continue; - } - }; + #[tracing::instrument(skip(create_event, value, db, pub_key_map))] + fn handle_outlier_pdu<'a>( + origin: &'a ServerName, + create_event: &'a PduEvent, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + db: &'a Database, + pub_key_map: &'a RwLock>>, + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { + Box::pin(async move { + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + fetch_required_signing_keys(&value, pub_key_map, db) + .await?; + + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + error!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") + })?; - match auth_events.entry(( - auth_event.kind.to_string().into(), - auth_event - .state_key - .clone() - .expect("all auth events have state keys"), - )) { - hash_map::Entry::Vacant(v) => { - v.insert(auth_event); + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + + let mut val = match ruma::signatures::verify_event( + &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, + &value, + room_version_id, + ) { + Err(e) => { + // Drop + warn!("Dropping bad event {}: {}", event_id, e); + return Err("Signature verification failed".to_owned()); } - hash_map::Entry::Occupied(_) => { - return Err(Error::BadRequest(ErrorKind::InvalidParam, - "Auth event's type and state_key combination exists multiple times." - )); + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + warn!("Calculated hash does not match: {}", event_id); + match ruma::signatures::redact(&value, room_version_id) { + Ok(obj) => obj, + Err(_) => return Err("Redaction failed".to_owned()), + } } - } - } + Ok(ruma::signatures::Verified::All) => value, + }; - // The original create event must be in the auth events - if auth_events - .get(&(StateEventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()) - != Some(create_event) - { - return Err(Error::BadRequest(ErrorKind::InvalidParam("Incoming event refers to wrong create event."))); - } + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + let incoming_pdu = serde_json::from_value::( + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| "Event is not a valid PDU.".to_owned())?; - if !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, // TODO: third party invite - |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - ) - .map_err(|e| {error!(e); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")})? - { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); - } + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // NOTE: Step 5 is not applied anymore because it failed too often + warn!("Fetching auth events for {}", incoming_pdu.event_id); + fetch_and_handle_outliers( + db, + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; - info!("Validation successful."); + // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + info!( + "Auth check for {} based on auth events", + incoming_pdu.event_id + ); - // 7. Persist the event as an outlier. - db.rooms - .add_pdu_outlier(&incoming_pdu.event_id, &val)?; + // Build map of auth events + let mut auth_events = HashMap::new(); + for id in &incoming_pdu.auth_events { + let auth_event = match db.rooms.get_pdu(id)? { + Some(e) => e, + None => { + warn!("Could not find auth event {}", id); + continue; + } + }; - info!("Added pdu as outlier."); + match auth_events.entry(( + auth_event.kind.to_string().into(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + hash_map::Entry::Vacant(v) => { + v.insert(auth_event); + } + hash_map::Entry::Occupied(_) => { + return Err(Error::BadRequest(ErrorKind::InvalidParam, + "Auth event's type and state_key combination exists multiple times." + )); + } + } + } - Ok((Arc::new(incoming_pdu), val)) - }) -} + // The original create event must be in the auth events + if auth_events + .get(&(StateEventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(create_event) + { + return Err(Error::BadRequest(ErrorKind::InvalidParam("Incoming event refers to wrong create event."))); + } -#[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] -async fn upgrade_outlier_to_timeline_pdu( - incoming_pdu: Arc, - val: BTreeMap, - create_event: &PduEvent, - origin: &ServerName, - db: &Database, - room_id: &RoomId, - pub_key_map: &RwLock>>, -) -> Result>, String> { - // Skip the PDU if we already have it as a timeline event - if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { - return Ok(Some(pduid)); - } + if !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), + ) + .map_err(|e| {error!(e); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")})? + { + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); + } - if db - .rooms - .is_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to ask db for soft fail".to_owned())? - { - return Err("Event has been soft failed".into()); - } + info!("Validation successful."); - info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); + // 7. Persist the event as an outlier. + db.rooms + .add_pdu_outlier(&incoming_pdu.event_id, &val)?; - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::BadDatabase("Invalid create event in db") - })?; + info!("Added pdu as outlier."); - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + Ok((Arc::new(incoming_pdu), val)) + }) + } - // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities - // doing all the checks in this list starting at 1. These are not timeline events. + #[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] + async fn upgrade_outlier_to_timeline_pdu( + incoming_pdu: Arc, + val: BTreeMap, + create_event: &PduEvent, + origin: &ServerName, + db: &Database, + room_id: &RoomId, + pub_key_map: &RwLock>>, + ) -> Result>, String> { + // Skip the PDU if we already have it as a timeline event + if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { + return Ok(Some(pduid)); + } - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event + if db + .rooms + .is_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to ask db for soft fail".to_owned())? + { + return Err("Event has been soft failed".into()); + } - info!("Requesting state at event"); - let mut state_at_incoming_event = None; + info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); - if incoming_pdu.prev_events.len() == 1 { - let prev_event = &*incoming_pdu.prev_events[0]; - let prev_event_sstatehash = db - .rooms - .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())?; + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") + })?; - let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(db.rooms.state_full_ids(shortstatehash).await) - } else { - None - }; - - if let Some(Ok(mut state)) = state { - info!("Using cached state"); - let prev_pdu = - db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { - "Could not find prev event, but we know the state.".to_owned() - })?; + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &prev_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + // doing all the checks in this list starting at 1. These are not timeline events. - state.insert(shortstatekey, Arc::from(prev_event)); - // Now it's the state after the pdu - } + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event - state_at_incoming_event = Some(state); - } - } else { - info!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::new(); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { - pdu - } else { - okay = false; - break; - }; + info!("Requesting state at event"); + let mut state_at_incoming_event = None; + + if incoming_pdu.prev_events.len() == 1 { + let prev_event = &*incoming_pdu.prev_events[0]; + let prev_event_sstatehash = db + .rooms + .pdu_shortstatehash(prev_event) + .map_err(|_| "Failed talking to db".to_owned())?; - let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { - s + let state = if let Some(shortstatehash) = prev_event_sstatehash { + Some(db.rooms.state_full_ids(shortstatehash).await) } else { - okay = false; - break; + None }; - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if okay { - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + if let Some(Ok(mut state)) = state { + info!("Using cached state"); + let prev_pdu = + db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { + "Could not find prev event, but we know the state.".to_owned() + })?; - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: BTreeMap<_, _> = db - .rooms - .state_full_ids(sstatehash) - .await - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &prev_event.state_key { + if let Some(state_key) = &prev_pdu.state_key { let shortstatekey = db .rooms .get_or_create_shortstatekey( - &prev_event.kind.to_string().into(), + &prev_pdu.kind.to_string().into(), state_key, &db.globals, ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + + state.insert(shortstatekey, Arc::from(prev_event)); // Now it's the state after the pdu } - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); + state_at_incoming_event = Some(state); + } + } else { + info!("Calculating state at event using state res"); + let mut extremity_sstatehashes = HashMap::new(); - for (k, id) in leaf_state { - if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - state.insert((ty.to_string().into(), st_key), id.clone()); - } else { - warn!("Failed to get_statekey_from_short."); - } - starting_events.push(id); - } + let mut okay = true; + for prev_eventid in &incoming_pdu.prev_events { + let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { + pdu + } else { + okay = false; + break; + }; - auth_chain_sets.push( - get_auth_chain(room_id, starting_events, db) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), - ); + let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { + s + } else { + okay = false; + break; + }; - fork_states.push(state); + extremity_sstatehashes.insert(sstatehash, prev_event); } - let lock = db.globals.stateres_mutex.lock(); + if okay { + let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); + for (sstatehash, prev_event) in extremity_sstatehashes { + let mut leaf_state: BTreeMap<_, _> = db + .rooms + .state_full_ids(sstatehash) + .await + .map_err(|_| "Failed to ask db for room state.".to_owned())?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + // Now it's the state after the pdu + } + + let mut state = StateMap::with_capacity(leaf_state.len()); + let mut starting_events = Vec::with_capacity(leaf_state.len()); + + for (k, id) in leaf_state { + if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); + } else { + warn!("Failed to get_statekey_from_short."); + } + starting_events.push(id); + } + + auth_chain_sets.push( + get_auth_chain(room_id, starting_events, db) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); + + fork_states.push(state); } - res.ok().flatten() - }); - drop(lock); - - state_at_incoming_event = match result { - Ok(new_state) => Some( - new_state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, event_id)) - }) - .collect::>()?, - ), - Err(e) => { - warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); - None + + let lock = db.globals.stateres_mutex.lock(); + + let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + drop(lock); + + state_at_incoming_event = match result { + Ok(new_state) => Some( + new_state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + Ok((shortstatekey, event_id)) + }) + .collect::>()?, + ), + Err(e) => { + warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); + None + } } } } - } - if state_at_incoming_event.is_none() { - info!("Calling /state_ids"); - // Call /state_ids to find out what the state at this pdu is. We trust the server's - // response to some extend, but we still do a lot of checks on the events - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id, - event_id: &incoming_pdu.event_id, - }, - ) - .await - { - Ok(res) => { - info!("Fetching state events at event."); - let state_vec = fetch_and_handle_outliers( - db, + if state_at_incoming_event.is_none() { + info!("Calling /state_ids"); + // Call /state_ids to find out what the state at this pdu is. We trust the server's + // response to some extend, but we still do a lot of checks on the events + match db + .sending + .send_federation_request( + &db.globals, origin, - &res.pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, + get_room_state_ids::v1::Request { + room_id, + event_id: &incoming_pdu.event_id, + }, ) - .await; - - let mut state: BTreeMap<_, Arc> = BTreeMap::new(); - for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + .await + { + Ok(res) => { + info!("Fetching state events at event."); + let state_vec = fetch_and_handle_outliers( + db, + origin, + &res.pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; + + let mut state: BTreeMap<_, Arc> = BTreeMap::new(); + for (pdu, _) in state_vec { + let state_key = pdu + .state_key + .clone() + .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + + match state.entry(shortstatekey) { + btree_map::Entry::Vacant(v) => { + v.insert(Arc::from(&*pdu.event_id)); + } + btree_map::Entry::Occupied(_) => return Err( + "State event's type and state_key combination exists multiple times." + .to_owned(), + ), + } + } - let shortstatekey = db + // The original create event must still be in the state + let create_shortstatekey = db .rooms - .get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - match state.entry(shortstatekey) { - btree_map::Entry::Vacant(v) => { - v.insert(Arc::from(&*pdu.event_id)); - } - btree_map::Entry::Occupied(_) => return Err( - "State event's type and state_key combination exists multiple times." - .to_owned(), - ), + .get_shortstatekey(&StateEventType::RoomCreate, "") + .map_err(|_| "Failed to talk to db.")? + .expect("Room exists"); + + if state.get(&create_shortstatekey).map(|id| id.as_ref()) + != Some(&create_event.event_id) + { + return Err("Incoming event refers to wrong create event.".to_owned()); } + + state_at_incoming_event = Some(state); + } + Err(e) => { + warn!("Fetching state for event failed: {}", e); + return Err("Fetching state for event failed".into()); } + }; + } - // The original create event must still be in the state - let create_shortstatekey = db - .rooms - .get_shortstatekey(&StateEventType::RoomCreate, "") - .map_err(|_| "Failed to talk to db.")? - .expect("Room exists"); + let state_at_incoming_event = + state_at_incoming_event.expect("we always set this to some above"); - if state.get(&create_shortstatekey).map(|id| id.as_ref()) - != Some(&create_event.event_id) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } + info!("Starting auth check"); + // 11. Check the auth of the event passes based on the state of the event + let check_result = state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| { + db.rooms + .get_shortstatekey(&k.to_string().into(), s) + .ok() + .flatten() + .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) + .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) + }, + ) + .map_err(|_e| "Auth check failed.".to_owned())?; - state_at_incoming_event = Some(state); - } - Err(e) => { - warn!("Fetching state for event failed: {}", e); - return Err("Fetching state for event failed".into()); - } - }; - } + if !check_result { + return Err("Event has failed auth check with state at the event.".into()); + } + info!("Auth check succeeded"); - let state_at_incoming_event = - state_at_incoming_event.expect("we always set this to some above"); + // We start looking at current room state now, so lets lock the room - info!("Starting auth check"); - // 11. Check the auth of the event passes based on the state of the event - let check_result = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, // TODO: third party invite - |k, s| { - db.rooms - .get_shortstatekey(&k.to_string().into(), s) - .ok() - .flatten() - .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) - }, - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if !check_result { - return Err("Event has failed auth check with state at the event.".into()); - } - info!("Auth check succeeded"); + let mutex_state = Arc::clone( + db.globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; - // We start looking at current room state now, so lets lock the room + // Now we calculate the set of extremities this room has after the incoming event has been + // applied. We start with the previous extremities (aka leaves) + info!("Calculating extremities"); + let mut extremities = db + .rooms + .get_pdu_leaves(room_id) + .map_err(|_| "Failed to load room leaves".to_owned())?; - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Now we calculate the set of extremities this room has after the incoming event has been - // applied. We start with the previous extremities (aka leaves) - info!("Calculating extremities"); - let mut extremities = db - .rooms - .get_pdu_leaves(room_id) - .map_err(|_| "Failed to load room leaves".to_owned())?; - - // Remove any forward extremities that are referenced by this incoming event's prev_events - for prev_event in &incoming_pdu.prev_events { - if extremities.contains(prev_event) { - extremities.remove(prev_event); + // Remove any forward extremities that are referenced by this incoming event's prev_events + for prev_event in &incoming_pdu.prev_events { + if extremities.contains(prev_event) { + extremities.remove(prev_event); + } } - } - // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); + // Only keep those extremities were not referenced yet + extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); - info!("Compressing state at event"); - let state_ids_compressed = state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, id, &db.globals) - .map_err(|_| "Failed to compress_state_event".to_owned()) - }) - .collect::>()?; + info!("Compressing state at event"); + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + db.rooms + .compress_state_event(*shortstatekey, id, &db.globals) + .map_err(|_| "Failed to compress_state_event".to_owned()) + }) + .collect::>()?; - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - info!("Starting soft fail auth check"); + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + info!("Starting soft fail auth check"); - let auth_events = db - .rooms - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; - - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - None::, - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if soft_fail { - append_incoming_pdu( - db, + let auth_events = db + .rooms + .get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + ) + .map_err(|_| "Failed to get_auth_events.".to_owned())?; + + let soft_fail = !state_res::event_auth::auth_check( + &room_version, &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, + None::, + |k, s| auth_events.get(&(k.clone(), s.to_owned())), ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; + .map_err(|_e| "Auth check failed.".to_owned())?; - // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {:?}", incoming_pdu); - db.rooms - .mark_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to set soft failed flag".to_owned())?; - return Err("Event has been soft failed".into()); - } - - if incoming_pdu.state_key.is_some() { - info!("Loading current room state ids"); - let current_sstatehash = db - .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? - .expect("every room has state"); + if soft_fail { + append_incoming_pdu( + db, + &incoming_pdu, + val, + extremities.iter().map(Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; - let current_state_ids = db - .rooms - .state_full_ids(current_sstatehash) - .await - .map_err(|_| "Failed to load room state.")?; + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {:?}", incoming_pdu); + db.rooms + .mark_event_soft_failed(&incoming_pdu.event_id) + .map_err(|_| "Failed to set soft failed flag".to_owned())?; + return Err("Event has been soft failed".into()); + } - info!("Preparing for stateres to derive new room state"); - let mut extremity_sstatehashes = HashMap::new(); + if incoming_pdu.state_key.is_some() { + info!("Loading current room state ids"); + let current_sstatehash = db + .rooms + .current_shortstatehash(room_id) + .map_err(|_| "Failed to load current state hash.".to_owned())? + .expect("every room has state"); - info!("Loading extremities"); - for id in dbg!(&extremities) { - match db + let current_state_ids = db .rooms - .get_pdu(id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? - { - Some(leaf_pdu) => { - extremity_sstatehashes.insert( - db.rooms - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - "Found pdu with no statehash in db.".to_owned() - })?, - leaf_pdu, - ); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); + .state_full_ids(current_sstatehash) + .await + .map_err(|_| "Failed to load room state.")?; + + info!("Preparing for stateres to derive new room state"); + let mut extremity_sstatehashes = HashMap::new(); + + info!("Loading extremities"); + for id in dbg!(&extremities) { + match db + .rooms + .get_pdu(id) + .map_err(|_| "Failed to ask db for pdu.".to_owned())? + { + Some(leaf_pdu) => { + extremity_sstatehashes.insert( + db.rooms + .pdu_shortstatehash(&leaf_pdu.event_id) + .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + "Found pdu with no statehash in db.".to_owned() + })?, + leaf_pdu, + ); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err("Missing state snapshot.".to_owned()); + } } } - } - let mut fork_states = Vec::new(); + let mut fork_states = Vec::new(); - // 12. Ensure that the state is derived from the previous current state (i.e. we calculated - // by doing state res where one of the inputs was a previously trusted set of state, - // don't just trust a set of state we got from a remote). + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). - // We do this by adding the current state to the list of fork states - extremity_sstatehashes.remove(¤t_sstatehash); - fork_states.push(current_state_ids); - - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + // We do this by adding the current state to the list of fork states + extremity_sstatehashes.remove(¤t_sstatehash); + fork_states.push(current_state_ids); - state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); - } - fork_states.push(state_after); - - let mut update_state = false; - // 14. Use state resolution to find new room state - let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); - } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { - info!("State resolution trivial"); - // There was only one state, so it has to be the room's current state (because that is - // always included) - fork_states[0] - .iter() - .map(|(k, id)| { - db.rooms - .compress_state_event(*k, id, &db.globals) - .map_err(|_| "Failed to compress_state_event.".to_owned()) - }) - .collect::>()? - } else { - info!("Loading auth chains"); - // We do need to force an update to this room's state - update_state = true; - - let mut auth_chain_sets = Vec::new(); - for state in &fork_states { - auth_chain_sets.push( - get_auth_chain( - room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - db, + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + &db.globals, ) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), - ); - } + .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - info!("Loading fork states"); + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + } + fork_states.push(state_after); + + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + return Err("State is empty.".to_owned()); + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + info!("State resolution trivial"); + // There was only one state, so it has to be the room's current state (because that is + // always included) + fork_states[0] + .iter() + .map(|(k, id)| { + db.rooms + .compress_state_event(*k, id, &db.globals) + .map_err(|_| "Failed to compress_state_event.".to_owned()) + }) + .collect::>()? + } else { + info!("Loading auth chains"); + // We do need to force an update to this room's state + update_state = true; + + let mut auth_chain_sets = Vec::new(); + for state in &fork_states { + auth_chain_sets.push( + get_auth_chain( + room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + db, + ) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), + ); + } - let fork_states: Vec<_> = fork_states - .into_iter() - .map(|map| { - map.into_iter() - .filter_map(|(k, id)| { - db.rooms - .get_statekey_from_short(k) - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType - .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) - .ok() - }) - .collect::>() - }) - .collect(); - - info!("Resolving state"); - - let lock = db.globals.stateres_mutex.lock(); - let state = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); + info!("Loading fork states"); + + let fork_states: Vec<_> = fork_states + .into_iter() + .map(|map| { + map.into_iter() + .filter_map(|(k, id)| { + db.rooms + .get_statekey_from_short(k) + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) + .ok() + }) + .collect::>() + }) + .collect(); + + info!("Resolving state"); + + let lock = db.globals.stateres_mutex.lock(); + let state = match state_res::resolve( + room_version_id, + &fork_states, + auth_chain_sets, + |id| { + let res = db.rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err("State resolution failed, either an event could not be found or deserialization".into()); } - res.ok().flatten() - }, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); - } + }; + + drop(lock); + + info!("State resolution done. Compressing state"); + + state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = db + .rooms + .get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + &db.globals, + ) + .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + db.rooms + .compress_state_event(shortstatekey, &event_id, &db.globals) + .map_err(|_| "Failed to compress state event".to_owned()) + }) + .collect::>()? }; - drop(lock); - - info!("State resolution done. Compressing state"); - - state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - db.rooms - .compress_state_event(shortstatekey, &event_id, &db.globals) - .map_err(|_| "Failed to compress state event".to_owned()) - }) - .collect::>()? - }; - - // Set the new room state to the resolved state - if update_state { - info!("Forcing new room state"); - db.rooms - .force_state(room_id, new_room_state, db) - .map_err(|_| "Failed to set new room state.".to_owned())?; + // Set the new room state to the resolved state + if update_state { + info!("Forcing new room state"); + db.rooms + .force_state(room_id, new_room_state, db) + .map_err(|_| "Failed to set new room state.".to_owned())?; + } } - } - info!("Appending pdu to timeline"); - extremities.insert(incoming_pdu.event_id.clone()); - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - - let pdu_id = append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; - - info!("Appended incoming pdu"); - - // Event has passed all auth/stateres checks - drop(state_lock); - Ok(pdu_id) -} + info!("Appending pdu to timeline"); + extremities.insert(incoming_pdu.event_id.clone()); -/// Find the event and auth it. Once the event is validated (steps 1 - 8) -/// it is appended to the outliers Tree. -/// -/// Returns pdu and if we fetched it over federation the raw json. -/// -/// a. Look in the main timeline (pduid_pdu tree) -/// b. Look at outlier pdu tree -/// c. Ask origin server over federation -/// d. TODO: Ask other servers over federation? -#[tracing::instrument(skip_all)] -pub(crate) fn fetch_and_handle_outliers<'a>( - db: &'a Database, - origin: &'a ServerName, - events: &'a [Arc], - create_event: &'a PduEvent, - room_id: &'a RoomId, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { - Box::pin(async move { - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. - let mut pdus = vec![]; - for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } + let pdu_id = append_incoming_pdu( + db, + &incoming_pdu, + val, + extremities.iter().map(Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", id); - continue; - } - } + info!("Appended incoming pdu"); - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { - trace!("Found {} in db", id); - pdus.push((local_pdu, None)); - continue; - } + // Event has passed all auth/stateres checks + drop(state_lock); + Ok(pdu_id) + } - // c. Ask origin server over federation - // We also handle its auth chain here so we don't get a stack overflow in - // handle_outlier_pdu. - let mut todo_auth_events = vec![Arc::clone(id)]; - let mut events_in_reverse_order = Vec::new(); - let mut events_all = HashSet::new(); - let mut i = 0; - while let Some(next_id) = todo_auth_events.pop() { - if events_all.contains(&next_id) { - continue; + /// Find the event and auth it. Once the event is validated (steps 1 - 8) + /// it is appended to the outliers Tree. + /// + /// Returns pdu and if we fetched it over federation the raw json. + /// + /// a. Look in the main timeline (pduid_pdu tree) + /// b. Look at outlier pdu tree + /// c. Ask origin server over federation + /// d. TODO: Ask other servers over federation? + #[tracing::instrument(skip_all)] + pub(crate) fn fetch_and_handle_outliers<'a>( + db: &'a Database, + origin: &'a ServerName, + events: &'a [Arc], + create_event: &'a PduEvent, + room_id: &'a RoomId, + pub_key_map: &'a RwLock>>, + ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { + Box::pin(async move { + let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; + let mut pdus = vec![]; + for id in events { + if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", id); + continue; + } } - if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { trace!("Found {} in db", id); + pdus.push((local_pdu, None)); continue; } - info!("Fetching {} over federation.", next_id); - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: &next_id }, - ) - .await - { - Ok(res) => { - info!("Got {} over federation", next_id); - let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { - Ok(t) => t, - Err(_) => { - back_off((*next_id).to_owned()); - continue; - } - }; + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); + let mut i = 0; + while let Some(next_id) = todo_auth_events.pop() { + if events_all.contains(&next_id) { + continue; + } - if calculated_event_id != *next_id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - next_id, calculated_event_id, &res.pdu); - } + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + + if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { + trace!("Found {} in db", id); + continue; + } - if let Some(auth_events) = - value.get("auth_events").and_then(|c| c.as_array()) - { - for auth_event in auth_events { - if let Ok(auth_event) = - serde_json::from_value(auth_event.clone().into()) - { - let a: Arc = auth_event; - todo_auth_events.push(a); - } else { - warn!("Auth event id is not valid"); + info!("Fetching {} over federation.", next_id); + match db + .sending + .send_federation_request( + &db.globals, + origin, + get_event::v1::Request { event_id: &next_id }, + ) + .await + { + Ok(res) => { + info!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { + Ok(t) => t, + Err(_) => { + back_off((*next_id).to_owned()); + continue; + } + }; + + if calculated_event_id != *next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); + } + + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { + for auth_event in auth_events { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } } + } else { + warn!("Auth event list invalid"); } - } else { - warn!("Auth event list invalid"); - } - events_in_reverse_order.push((next_id.clone(), value)); - events_all.insert(next_id); - } - Err(_) => { - warn!("Failed to fetch event: {}", next_id); - back_off((*next_id).to_owned()); + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((*next_id).to_owned()); + } } } - } - for (next_id, value) in events_in_reverse_order.iter().rev() { - match handle_outlier_pdu( - origin, - create_event, - next_id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => { - if next_id == id { - pdus.push((pdu, Some(json))); + for (next_id, value) in events_in_reverse_order.iter().rev() { + match handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + db, + pub_key_map, + ) + .await + { + Ok((pdu, json)) => { + if next_id == id { + pdus.push((pdu, Some(json))); + } + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()); } - } - Err(e) => { - warn!("Authentication of event {} failed: {:?}", next_id, e); - back_off((**next_id).to_owned()); } } } - } - pdus - }) -} - + pdus + }) + } -fn fetch_unknown_prev_events(initial_set: Vec>) -> Vec> { - let mut graph: HashMap, _> = HashMap::new(); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec> = initial_set; - let mut amount = 0; + fn fetch_unknown_prev_events(initial_set: Vec>) -> Vec> { + let mut graph: HashMap, _> = HashMap::new(); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = initial_set; - while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() - { - if amount > 100 { - // Max limit reached - warn!("Max prev event limit reached!"); - graph.insert(prev_event_id.clone(), HashSet::new()); - continue; - } + let mut amount = 0; - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, json_opt)) = fetch_and_handle_outliers( + db, + origin, + &[prev_event_id.clone()], + &create_event, + room_id, + pub_key_map, + ) + .await + .pop() { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount += 1; - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); + if amount > 100 { + // Max limit reached + warn!("Max prev event limit reached!"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + } + + if let Some(json) = + json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) + { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount += 1; + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(dbg!(prev_prev.clone())); + } } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); } - graph.insert( - prev_event_id.clone(), - pdu.prev_events.iter().cloned().collect(), - ); + eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { - // Time based check failed + // Get json failed, so this was not fetched over federation graph.insert(prev_event_id.clone(), HashSet::new()); } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); } else { - // Get json failed, so this was not fetched over federation + // Fetch and handle failed graph.insert(prev_event_id.clone(), HashSet::new()); } - } else { - // Fetch and handle failed - graph.insert(prev_event_id.clone(), HashSet::new()); } - } - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - println!("{}", event_id); - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; - - sorted + let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), + )) + }) + .map_err(|_| "Error sorting prev events".to_owned())?; + + sorted + } } diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs new file mode 100644 index 00000000..9cf2d8bc --- /dev/null +++ b/src/service/rooms/lazy_loading/data.rs @@ -0,0 +1,24 @@ +pub trait Data { + fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result; + + fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + since: u64, + ) -> Result<()>; + + fn lazy_load_reset( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ) -> Result<()>; +} diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a402702a..cf00174b 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,4 +1,13 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self))] pub fn lazy_load_was_sent_before( &self, @@ -7,14 +16,7 @@ room_id: &RoomId, ll_user: &UserId, ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) + self.db.lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) } #[tracing::instrument(skip(self))] @@ -45,27 +47,7 @@ room_id: &RoomId, since: u64, ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) + self.db.lazy_load_confirm_delivery(user_d, device_id, room_id, since) } #[tracing::instrument(skip(self))] @@ -75,17 +57,6 @@ device_id: &DeviceId, room_id: &RoomId, ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) + self.db.lazy_load_reset(user_id, device_id, room_id); } - +} diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs new file mode 100644 index 00000000..58bd3510 --- /dev/null +++ b/src/service/rooms/metadata/data.rs @@ -0,0 +1,3 @@ +pub trait Data { + fn exists(&self, room_id: &RoomId) -> Result; +} diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 5d703451..644cd18f 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,44 +1,16 @@ - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; +mod data; +pub use data::Data; - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } +use crate::service::*; - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } +pub struct Service { + db: D, +} - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) +impl Service<_> { + /// Checks if a room exists. + #[tracing::instrument(skip(self))] + pub fn exists(&self, room_id: &RoomId) -> Result { + self.db.exists(room_id) } - +} diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs new file mode 100644 index 00000000..6b534b95 --- /dev/null +++ b/src/service/rooms/outlier/data.rs @@ -0,0 +1,5 @@ +pub trait Data { + fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; + fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; + fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()>; +} diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 340e93e4..c82cb628 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,27 +1,26 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Returns the pdu from the outlier tree. pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) + self.db.get_outlier_pdu_json(event_id) } /// Returns the pdu from the outlier tree. pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) + self.db.get_outlier_pdu(event_id) } /// Append the PDU as an outlier. #[tracing::instrument(skip(self, pdu))] pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) + self.db.add_pdu_outlier(event_id, pdu) } - +} diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs new file mode 100644 index 00000000..67787958 --- /dev/null +++ b/src/service/rooms/pdu_metadata/data.rs @@ -0,0 +1,6 @@ +pub trait Data { + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; + fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; + fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; + fn is_event_soft_failed(&self, event_id: &EventId) -> Result; +} diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index f8ffcee1..6d6df223 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,31 +1,30 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) + self.db.mark_as_referenced(room_id, event_ids) } #[tracing::instrument(skip(self))] pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) + self.db.is_event_referenced(room_id, event_id) } #[tracing::instrument(skip(self))] pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) + self.db.mark_event_soft_failed(event_id) } #[tracing::instrument(skip(self))] pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) + self.db.is_event_soft_failed(event_id) } - +} diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 63e8b713..c44d357c 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -196,3 +196,30 @@ }) } + pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.roomid_shortroomid + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + }) + .transpose() + } + + pub fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + globals: &super::globals::Globals, + ) -> Result { + Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, + None => { + let short = globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + } + }) + } + diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 4b42ca8e..8aa76380 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,16 +1,24 @@ pub trait Data { + /// Returns the last state hash key added to the db for the given room. fn get_room_shortstatehash(room_id: &RoomId); -} - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } + /// Update the current state of the room. + fn set_room_state(room_id: &RoomId, new_shortstatehash: u64 + _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + ); + + /// Associates a state with an event. + fn set_event_state(shorteventid: u64, shortstatehash: u64) -> Result<()> { + + /// Returns all events we would send as the prev_events of the next event. + fn get_forward_extremities(room_id: &RoomId) -> Result>>; + + /// Replace the forward extremities of the room. + fn set_forward_extremities( + room_id: &RoomId, + event_ids: impl IntoIterator + Debug, + _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { +} +pub struct StateLock; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index da03ad4c..b513ab53 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,25 +1,30 @@ +mod data; +pub use data::Data; + +use crate::service::*; + pub struct Service { db: D, } -impl Service { +impl Service<_> { /// Set the room to the given statehash and update caches. #[tracing::instrument(skip(self, new_state_ids_compressed, db))] pub fn force_state( &self, room_id: &RoomId, shortstatehash: u64, - statediffnew :HashSet, - statediffremoved :HashSet, + statediffnew: HashSet, + statediffremoved: HashSet, db: &Database, ) -> Result<()> { for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) + state_compressor::parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { - let pdu = match self.get_pdu_json(&event_id)? { + let pdu = match timeline::get_pdu_json(&event_id)? { Some(pdu) => pdu, None => continue, }; @@ -55,56 +60,12 @@ impl Service { Err(_) => continue, }; - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; + room::state_cache::update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; } - self.update_joined_count(room_id, db)?; + room::state_cache::update_joined_count(room_id, db)?; - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves<'a>( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } + db.set_room_state(room_id, new_shortstatehash); Ok(()) } @@ -121,11 +82,11 @@ impl Service { state_ids_compressed: HashSet, globals: &super::globals::Globals, ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; + let shorteventid = short::get_or_create_shorteventid(event_id, globals)?; - let previous_shortstatehash = self.current_shortstatehash(room_id)?; + let previous_shortstatehash = db.get_room_shortstatehash(room_id)?; - let state_hash = self.calculate_hash( + let state_hash = super::calculate_hash( &state_ids_compressed .iter() .map(|s| &s[..]) @@ -133,11 +94,11 @@ impl Service { ); let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; + short::get_or_create_shortstatehash(&state_hash, globals)?; if !already_existed { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| room::state_compressor.load_shortstatehash_info(p))?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -156,7 +117,7 @@ impl Service { } else { (state_ids_compressed, HashSet::new()) }; - self.save_state_from_diff( + state_compressor::save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -165,8 +126,7 @@ impl Service { )?; } - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } @@ -183,7 +143,7 @@ impl Service { ) -> Result { let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; + let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; if let Some(p) = previous_shortstatehash { self.shorteventid_shortstatehash @@ -293,4 +253,8 @@ impl Service { Ok(()) } + + pub fn db(&self) -> D { + &self.db + } } From c21820083bf3285634e9f7098c19fd9527233029 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:34:24 +0200 Subject: [PATCH 334/445] refactor: prepare src/service/rooms/edus/read_receipt/mod.rs from src/service/rooms/edus/mod.rs --- src/service/rooms/edus/{ => read_receipt}/mod.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => read_receipt}/mod.rs (100%) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs similarity index 100% rename from src/service/rooms/edus/mod.rs rename to src/service/rooms/edus/read_receipt/mod.rs From bfccd4f136117c4177e34aba10862a7d228fa556 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:35:14 +0200 Subject: [PATCH 335/445] refactor: prepare src/service/rooms/edus/presence/mod.rs from src/service/rooms/edus/mod.rs --- src/service/rooms/edus/{ => presence}/mod.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => presence}/mod.rs (100%) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/presence/mod.rs similarity index 100% rename from src/service/rooms/edus/mod.rs rename to src/service/rooms/edus/presence/mod.rs From d410f086424481123ca7893579e1d95bc289e3d0 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:36:08 +0200 Subject: [PATCH 336/445] refactor: prepare src/service/rooms/edus/typing/mod.rs from src/service/rooms/edus/mod.rs --- src/service/rooms/edus/{ => typing}/mod.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => typing}/mod.rs (100%) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/typing/mod.rs similarity index 100% rename from src/service/rooms/edus/mod.rs rename to src/service/rooms/edus/typing/mod.rs From 73217f238c61792967b72bc8016dbe4bc3efd38e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:37:57 +0200 Subject: [PATCH 337/445] refactor: prepare service/rooms/edus/presence/data.rs from service/rooms/edus/data.rs --- src/service/rooms/edus/{ => presence}/data.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => presence}/data.rs (100%) diff --git a/src/service/rooms/edus/data.rs b/src/service/rooms/edus/presence/data.rs similarity index 100% rename from src/service/rooms/edus/data.rs rename to src/service/rooms/edus/presence/data.rs From ac4724e82c9e5719287d9a2e30da037f5eb66f8c Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:38:23 +0200 Subject: [PATCH 338/445] refactor: prepare service/rooms/edus/read_receipt/data.rs from service/rooms/edus/data.rs --- src/service/rooms/edus/{ => read_receipt}/data.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => read_receipt}/data.rs (100%) diff --git a/src/service/rooms/edus/data.rs b/src/service/rooms/edus/read_receipt/data.rs similarity index 100% rename from src/service/rooms/edus/data.rs rename to src/service/rooms/edus/read_receipt/data.rs From c7e601eb0bb13ca374f657bb55934397174cd92d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 17:38:46 +0200 Subject: [PATCH 339/445] refactor: prepare service/rooms/edus/typing/data.rs from service/rooms/edus/data.rs --- src/service/rooms/edus/{ => typing}/data.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/edus/{ => typing}/data.rs (100%) diff --git a/src/service/rooms/edus/data.rs b/src/service/rooms/edus/typing/data.rs similarity index 100% rename from src/service/rooms/edus/data.rs rename to src/service/rooms/edus/typing/data.rs From 1869a38b8517b772456886627b9bbfa89224cab5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 10 Jul 2022 14:37:34 +0200 Subject: [PATCH 340/445] refactor(edus): split edus into separate modules --- src/database/key_value.rs | 14 +- src/service/rooms/edus/mod.rs | 259 +------------------- src/service/rooms/edus/presence/data.rs | 70 +----- src/service/rooms/edus/presence/mod.rs | 137 ----------- src/service/rooms/edus/read_receipt/data.rs | 62 +---- src/service/rooms/edus/read_receipt/mod.rs | 205 +--------------- src/service/rooms/edus/typing/data.rs | 85 +------ src/service/rooms/edus/typing/mod.rs | 175 +------------ 8 files changed, 27 insertions(+), 980 deletions(-) diff --git a/src/database/key_value.rs b/src/database/key_value.rs index 34916e4b..0be13115 100644 --- a/src/database/key_value.rs +++ b/src/database/key_value.rs @@ -156,7 +156,7 @@ impl service::room::directory::Data for KeyValueDatabase { } } -impl service::room::edus::Data for KeyValueDatabase { +impl service::room::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( &self, user_id: &UserId, @@ -203,7 +203,7 @@ impl service::room::edus::Data for KeyValueDatabase { room_id: &RoomId, since: u64, ) -> impl Iterator< - Item = Result<( + Item=Result<( Box, u64, Raw, @@ -229,7 +229,7 @@ impl service::room::edus::Data for KeyValueDatabase { Error::bad_database("Invalid readreceiptid userid bytes in db.") })?, ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; + .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; let mut json = serde_json::from_slice::(&v).map_err(|_| { Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") @@ -293,7 +293,9 @@ impl service::room::edus::Data for KeyValueDatabase { .transpose()? .unwrap_or(0)) } +} +impl service::room::edus::typing::Data for KeyValueDatabase { fn typing_add( &self, user_id: &UserId, @@ -379,14 +381,16 @@ impl service::room::edus::Data for KeyValueDatabase { let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { Error::bad_database("User ID in typingid_userid is invalid unicode.") })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; user_ids.insert(user_id); } Ok(user_ids) } +} +impl service::room::edus::presence::Data for KeyValueDatabase { fn update_presence( &self, user_id: &UserId, @@ -416,7 +420,7 @@ impl service::room::edus::Data for KeyValueDatabase { Ok(()) } - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { + fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_lastpresenceupdate.insert( user_id.as_bytes(), &utils::millis_since_unix_epoch().to_be_bytes(), diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index 06adf57e..d8ce5300 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -1,256 +1,3 @@ -mod data; -pub use data::Data; - -use crate::service::*; - -pub struct Service { - db: D, -} - -impl Service<_> { - /// Replaces the previous read receipt. - pub fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event); - } - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a { - self.db.readreceipts_since(room_id, since) - } - - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) - } - - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.private_read_get(room_id, user_id) - } - - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_privateread_update(user_id, room_id) - } - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { - self.db.typing_add(user_id, room_id, timeout) - } - - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - self.db.typing_remove(user_id, room_id) - } - - /* TODO: Do this in background thread? - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - */ - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - self.db.last_typing_update(room_id) - } - - /// Returns a new typing EDU. - pub fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let user_ids = self.db.typings_all(room_id)?; - - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) - } - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - self.db.update_presence(user_id, room_id, presence) - } - - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.db.ping_presence(user_id) - } - - pub fn get_last_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result> { - let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - - self.db.get_presence_event(room_id, user_id, last_update) - } - - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - - /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - self.db.presence_since(room_id, since) - } -} +pub mod presence; +pub mod read_receipt; +pub mod typing; diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 16c14cf3..de72e219 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,67 +1,4 @@ pub trait Data { - /// Replaces the previous read receipt. - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()>; - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - fn readreceipts_since( - &self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - >; - - /// Sets a private read marker at `count`. - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()>; - - /// Returns the private read marker. - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; - - /// Returns the count of the last typing update in this room. - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()>; - - /// Removes a user from typing before the timeout is reached. - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()>; - - /// Returns the count of the last typing update in this room. - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result; - - /// Returns all user ids currently typing. - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result>; - /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to @@ -80,7 +17,12 @@ pub trait Data { fn last_presence_update(&self, user_id: &UserId) -> Result>; /// Returns the presence event with correct last_active_ago. - fn get_presence_event(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result>; + fn get_presence_event( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + ) -> Result>; /// Returns the most recent presence updates that happened after the event with id `since`. fn presence_since( diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 06adf57e..5793a799 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -8,143 +8,6 @@ pub struct Service { } impl Service<_> { - /// Replaces the previous read receipt. - pub fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event); - } - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a { - self.db.readreceipts_since(room_id, since) - } - - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) - } - - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.private_read_get(room_id, user_id) - } - - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_privateread_update(user_id, room_id) - } - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { - self.db.typing_add(user_id, room_id, timeout) - } - - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - self.db.typing_remove(user_id, room_id) - } - - /* TODO: Do this in background thread? - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - */ - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - self.db.last_typing_update(room_id) - } - - /// Returns a new typing EDU. - pub fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let user_ids = self.db.typings_all(room_id)?; - - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) - } - /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 16c14cf3..4befcf2c 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -21,71 +21,11 @@ pub trait Data { >; /// Sets a private read marker at `count`. - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()>; + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; /// Returns the private read marker. fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; /// Returns the count of the last typing update in this room. fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()>; - - /// Removes a user from typing before the timeout is reached. - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()>; - - /// Returns the count of the last typing update in this room. - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result; - - /// Returns all user ids currently typing. - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result>; - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()>; - - /// Resets the presence timeout, so the user will stay in their current presence state. - fn ping_presence(&self, user_id: &UserId) -> Result<()>; - - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - fn last_presence_update(&self, user_id: &UserId) -> Result>; - - /// Returns the presence event with correct last_active_ago. - fn get_presence_event(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result>; - - /// Returns the most recent presence updates that happened after the event with id `since`. - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>>; } diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 06adf57e..9cd474fb 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -36,12 +36,7 @@ impl Service<_> { /// Sets a private read marker at `count`. #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { + pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { self.db.private_read_set(room_id, user_id, count) } @@ -55,202 +50,4 @@ impl Service<_> { pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { self.db.last_privateread_update(user_id, room_id) } - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { - self.db.typing_add(user_id, room_id, timeout) - } - - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - self.db.typing_remove(user_id, room_id) - } - - /* TODO: Do this in background thread? - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - */ - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - self.db.last_typing_update(room_id) - } - - /// Returns a new typing EDU. - pub fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let user_ids = self.db.typings_all(room_id)?; - - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) - } - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - self.db.update_presence(user_id, room_id, presence) - } - - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.db.ping_presence(user_id) - } - - pub fn get_last_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result> { - let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - - self.db.get_presence_event(room_id, user_id, last_update) - } - - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - - /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - self.db.presence_since(room_id, since) - } } diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 16c14cf3..83ff90ea 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,91 +1,14 @@ pub trait Data { - /// Replaces the previous read receipt. - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()>; - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - fn readreceipts_since( - &self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - >; - - /// Sets a private read marker at `count`. - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()>; - - /// Returns the private read marker. - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; - - /// Returns the count of the last typing update in this room. - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()>; + fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>; /// Removes a user from typing before the timeout is reached. - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()>; + fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; /// Returns the count of the last typing update in this room. - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result; + fn last_typing_update(&self, room_id: &RoomId) -> Result; /// Returns all user ids currently typing. - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result>; - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()>; - - /// Resets the presence timeout, so the user will stay in their current presence state. - fn ping_presence(&self, user_id: &UserId) -> Result<()>; - - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - fn last_presence_update(&self, user_id: &UserId) -> Result>; - - /// Returns the presence event with correct last_active_ago. - fn get_presence_event(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result>; - - /// Returns the most recent presence updates that happened after the event with id `since`. - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>>; + fn typings_all(&self, room_id: &RoomId) -> Result>; } diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 06adf57e..b29c7888 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -8,71 +8,14 @@ pub struct Service { } impl Service<_> { - /// Replaces the previous read receipt. - pub fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event); - } - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a { - self.db.readreceipts_since(room_id, since) - } - - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) - } - - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - self.db.private_read_get(room_id, user_id) - } - - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - self.db.last_privateread_update(user_id, room_id) - } - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { + pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { self.db.typing_add(user_id, room_id, timeout) } /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { + pub fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { self.db.typing_remove(user_id, room_id) } @@ -124,10 +67,7 @@ impl Service<_> { /// Returns the count of the last typing update in this room. #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { + pub fn last_typing_update(&self, room_id: &RoomId) -> Result { self.db.last_typing_update(room_id) } @@ -144,113 +84,4 @@ impl Service<_> { }, }) } - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - self.db.update_presence(user_id, room_id, presence) - } - - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.db.ping_presence(user_id) - } - - pub fn get_last_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result> { - let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - - self.db.get_presence_event(room_id, user_id, last_update) - } - - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - - /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - self.db.presence_since(room_id, since) - } } From 84630f90b77d9ff41551c1d4d1eda1f33419b039 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:37 +0200 Subject: [PATCH 341/445] refactor: prepare src/database/key_value/room/edus/read_receipt.rs from src/database/key_value.rs --- .../{key_value.rs => key_value/room/edus/read_receipt.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/edus/read_receipt.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/edus/read_receipt.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/edus/read_receipt.rs From 8fa990330f0ef518a352e3d8a5977a11f49590a6 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:37 +0200 Subject: [PATCH 342/445] refactor: prepare src/database/key_value/room/edus/presence.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/edus/presence.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/edus/presence.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/edus/presence.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/edus/presence.rs From 0f77ae14e4a3498eede69055a0b2dc0459a5ad11 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:36 +0200 Subject: [PATCH 343/445] refactor: prepare src/database/key_value/room/directory.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/directory.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/directory.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/directory.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/directory.rs From 56cacf6f1ce2c0ee34fb760c5a00450bd17b31b8 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:36 +0200 Subject: [PATCH 344/445] refactor: prepare src/database/key_value/room/alias.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/alias.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/alias.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/alias.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/alias.rs From 2950349adf5d64e874fe8b0856cc17038c16b78e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:38 +0200 Subject: [PATCH 345/445] refactor: prepare src/database/key_value/room/metadata.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/metadata.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/metadata.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/metadata.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/metadata.rs From cd3a16381629e20b072aadf090d0ca9c1d42b5f9 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:38 +0200 Subject: [PATCH 346/445] refactor: prepare src/database/key_value/room/lazy_load.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/lazy_load.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/lazy_load.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/lazy_load.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/lazy_load.rs From 0213a32e6a61685319322e06b2d86c09a81ff225 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:37 +0200 Subject: [PATCH 347/445] refactor: prepare src/database/key_value/room/edus/typing.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/edus/typing.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/edus/typing.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/edus/typing.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/edus/typing.rs From 332e7c9dba5827a70959d3ad42e0e0a731474a83 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:39 +0200 Subject: [PATCH 348/445] refactor: prepare src/database/key_value/room/state.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/state.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/state.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/state.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/state.rs From ea2dcf4ff09108d9191ffe76ec52cf47a30bfd55 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:38 +0200 Subject: [PATCH 349/445] refactor: prepare src/database/key_value/room/pdu_metadata.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/pdu_metadata.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/pdu_metadata.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/pdu_metadata.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/pdu_metadata.rs From 158de9ca08937781d2feee89ff460e59204dee4f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:10:38 +0200 Subject: [PATCH 350/445] refactor: prepare src/database/key_value/room/outlier.rs from src/database/key_value.rs --- src/database/{key_value.rs => key_value/room/outlier.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/database/{key_value.rs => key_value/room/outlier.rs} (100%) diff --git a/src/database/key_value.rs b/src/database/key_value/room/outlier.rs similarity index 100% rename from src/database/key_value.rs rename to src/database/key_value/room/outlier.rs From 03e6e43ecd00e739d85f99ebd1bfe289e2bbecb3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 10 Jul 2022 16:03:40 +0200 Subject: [PATCH 351/445] refactor: split up database/key_value.rs --- src/database/key_value/mod.rs | 13 + src/database/key_value/room.rs | 1 + src/database/key_value/room/alias.rs | 588 ---------------- src/database/key_value/room/directory.rs | 630 ----------------- src/database/key_value/room/edus/presence.rs | 530 --------------- .../key_value/room/edus/read_receipt.rs | 516 -------------- src/database/key_value/room/edus/typing.rs | 560 --------------- src/database/key_value/room/lazy_load.rs | 586 ---------------- src/database/key_value/room/metadata.rs | 638 ------------------ src/database/key_value/room/mod.rs | 17 + src/database/key_value/room/outlier.rs | 630 ----------------- src/database/key_value/room/pdu_metadata.rs | 627 ----------------- src/database/key_value/room/state.rs | 592 ---------------- 13 files changed, 31 insertions(+), 5897 deletions(-) create mode 100644 src/database/key_value/mod.rs create mode 100644 src/database/key_value/room.rs create mode 100644 src/database/key_value/room/mod.rs diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs new file mode 100644 index 00000000..0c09c17e --- /dev/null +++ b/src/database/key_value/mod.rs @@ -0,0 +1,13 @@ +mod account_data; +mod admin; +mod appservice; +mod globals; +mod key_backups; +mod media; +mod pdu; +mod pusher; +mod rooms; +mod sending; +mod transaction_ids; +mod uiaa; +mod users; diff --git a/src/database/key_value/room.rs b/src/database/key_value/room.rs new file mode 100644 index 00000000..8bd6648e --- /dev/null +++ b/src/database/key_value/room.rs @@ -0,0 +1 @@ +asdf diff --git a/src/database/key_value/room/alias.rs b/src/database/key_value/room/alias.rs index 0be13115..b00eb3b1 100644 --- a/src/database/key_value/room/alias.rs +++ b/src/database/key_value/room/alias.rs @@ -1,69 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - impl service::room::alias::Data for KeyValueDatabase { fn set_alias( &self, @@ -130,525 +64,3 @@ impl service::room::alias::Data for KeyValueDatabase { }) } } - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/directory.rs b/src/database/key_value/room/directory.rs index 0be13115..f42de45e 100644 --- a/src/database/key_value/room/directory.rs +++ b/src/database/key_value/room/directory.rs @@ -1,136 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - impl service::room::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.insert(room_id.as_bytes(), &[])?; @@ -155,500 +22,3 @@ impl service::room::directory::Data for KeyValueDatabase { }) } } - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/edus/presence.rs b/src/database/key_value/room/edus/presence.rs index 0be13115..61bd9d60 100644 --- a/src/database/key_value/room/edus/presence.rs +++ b/src/database/key_value/room/edus/presence.rs @@ -1,395 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - impl service::room::edus::presence::Data for KeyValueDatabase { fn update_presence( &self, @@ -514,141 +122,3 @@ fn parse_presence_event(bytes: &[u8]) -> Result { } } -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/edus/read_receipt.rs b/src/database/key_value/room/edus/read_receipt.rs index 0be13115..556e697f 100644 --- a/src/database/key_value/room/edus/read_receipt.rs +++ b/src/database/key_value/room/edus/read_receipt.rs @@ -1,161 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - impl service::room::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( &self, @@ -294,361 +136,3 @@ impl service::room::edus::read_receipt::Data for KeyValueDatabase { .unwrap_or(0)) } } - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/edus/typing.rs b/src/database/key_value/room/edus/typing.rs index 0be13115..8cfb432d 100644 --- a/src/database/key_value/room/edus/typing.rs +++ b/src/database/key_value/room/edus/typing.rs @@ -1,300 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - impl service::room::edus::typing::Data for KeyValueDatabase { fn typing_add( &self, @@ -389,266 +92,3 @@ impl service::room::edus::typing::Data for KeyValueDatabase { Ok(user_ids) } } - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/lazy_load.rs b/src/database/key_value/room/lazy_load.rs index 0be13115..8abdce49 100644 --- a/src/database/key_value/room/lazy_load.rs +++ b/src/database/key_value/room/lazy_load.rs @@ -1,519 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - impl service::room::lazy_load::Data for KeyValueDatabase { fn lazy_load_was_sent_before( &self, @@ -582,73 +66,3 @@ impl service::room::lazy_load::Data for KeyValueDatabase { Ok(()) } } - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/metadata.rs b/src/database/key_value/room/metadata.rs index 0be13115..37dd7173 100644 --- a/src/database/key_value/room/metadata.rs +++ b/src/database/key_value/room/metadata.rs @@ -1,588 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - impl service::room::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { let prefix = match self.get_shortroomid(room_id)? { @@ -599,56 +14,3 @@ impl service::room::metadata::Data for KeyValueDatabase { .is_some()) } } - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/mod.rs b/src/database/key_value/room/mod.rs new file mode 100644 index 00000000..2a3f81d8 --- /dev/null +++ b/src/database/key_value/room/mod.rs @@ -0,0 +1,17 @@ +mod state; +mod alias; +mod directory; +mod edus; +mod event_handler; +mod lazy_loading; +mod metadata; +mod outlier; +mod pdu_metadata; +mod search; +mod short; +mod state; +mod state_accessor; +mod state_cache; +mod state_compressor; +mod timeline; +mod user; diff --git a/src/database/key_value/room/outlier.rs b/src/database/key_value/room/outlier.rs index 0be13115..c979d253 100644 --- a/src/database/key_value/room/outlier.rs +++ b/src/database/key_value/room/outlier.rs @@ -1,605 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - impl service::room::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu @@ -624,31 +22,3 @@ impl service::room::outlier::Data for KeyValueDatabase { ) } } - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } -} diff --git a/src/database/key_value/room/pdu_metadata.rs b/src/database/key_value/room/pdu_metadata.rs index 0be13115..6b2171ca 100644 --- a/src/database/key_value/room/pdu_metadata.rs +++ b/src/database/key_value/room/pdu_metadata.rs @@ -1,630 +1,3 @@ -use crate::service; - -impl service::room::state::Data for KeyValueDatabase { - fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) - } - - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - fn set_forward_extremities( - &self, - room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - impl service::room::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { diff --git a/src/database/key_value/room/state.rs b/src/database/key_value/room/state.rs index 0be13115..5daf6c6a 100644 --- a/src/database/key_value/room/state.rs +++ b/src/database/key_value/room/state.rs @@ -1,5 +1,3 @@ -use crate::service; - impl service::room::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash @@ -61,594 +59,4 @@ impl service::room::state::Data for KeyValueDatabase { Ok(()) } - -} - -impl service::room::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId> - ) -> Result<()> { - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - Ok(()) - } - - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - Ok(()) - } - - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result<()> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - fn local_aliases_for_room( - &self, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } -} - -impl service::room::directory::Data for KeyValueDatabase { - fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } - - fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; - } - - fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - fn public_rooms(&self) -> impl Iterator>> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } -} - -impl service::room::edus::read_receipt::Data for KeyValueDatabase { - fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: ReceiptEvent, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator< - Item=Result<( - Box, - u64, - Raw, - )>, - > + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - } - - fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } -} - -impl service::room::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { - let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; - - user_ids.insert(user_id); - } - - Ok(user_ids) - } -} - -impl service::room::edus::presence::Data for KeyValueDatabase { - fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - fn get_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - count: u64, - ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) - .transpose() - } - - fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result, PresenceEvent>> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} - -fn parse_presence_event(bytes: &[u8]) -> Result { - let mut presence: PresenceEvent = serde_json::from_slice(bytes) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } -} - -impl service::room::lazy_load::Data for KeyValueDatabase { - fn lazy_load_was_sent_before( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ll_user: &UserId, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(ll_user.as_bytes()); - Ok(self.lazyloadedids.get(&key)?.is_some()) - } - - fn lazy_load_confirm_delivery( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - since: u64, - ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } - } - - Ok(()) - } - - fn lazy_load_reset( - &self, - user_id: &UserId, - device_id: &DeviceId, - room_id: &RoomId, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.lazyloadedids.scan_prefix(prefix) { - self.lazyloadedids.remove(&key)?; - } - - Ok(()) - } -} - -impl service::room::metadata::Data for KeyValueDatabase { - fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } -} - -impl service::room::outlier::Data for KeyValueDatabase { - fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } -} - -impl service::room::pdu_metadata::Data for KeyValueDatabase { - fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } } From 877ee484803e0a3b3b36aa292bc08189ae078275 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:52:58 +0200 Subject: [PATCH 352/445] refactor: prepare database/key_value/rooms/search.rs from service/rooms/search/mod.rs --- .../rooms/search/mod.rs => database/key_value/rooms/search.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/search/mod.rs => database/key_value/rooms/search.rs} (100%) diff --git a/src/service/rooms/search/mod.rs b/src/database/key_value/rooms/search.rs similarity index 100% rename from src/service/rooms/search/mod.rs rename to src/database/key_value/rooms/search.rs From f6040ef2d7f766eb20dc1c0268d4e7b79f4da44d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 9 Oct 2022 18:52:58 +0200 Subject: [PATCH 353/445] refactor: prepare database/key_value/rooms/search.rs from service/rooms/timeline/mod.rs --- .../rooms/timeline/mod.rs => database/key_value/rooms/search.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/timeline/mod.rs => database/key_value/rooms/search.rs} (100%) diff --git a/src/service/rooms/timeline/mod.rs b/src/database/key_value/rooms/search.rs similarity index 100% rename from src/service/rooms/timeline/mod.rs rename to src/database/key_value/rooms/search.rs From b0029c49b917ccecc06c475db709aeef4671256c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 10 Jul 2022 16:28:43 +0200 Subject: [PATCH 354/445] refactor: work on search --- src/database/key_value/room.rs | 1 - .../key_value/{room => rooms}/alias.rs | 0 .../key_value/{room => rooms}/directory.rs | 0 .../{room => rooms}/edus/presence.rs | 0 .../{room => rooms}/edus/read_receipt.rs | 0 .../key_value/{room => rooms}/edus/typing.rs | 0 .../key_value/{room => rooms}/lazy_load.rs | 0 .../key_value/{room => rooms}/metadata.rs | 0 src/database/key_value/{room => rooms}/mod.rs | 0 .../key_value/{room => rooms}/outlier.rs | 0 .../key_value/{room => rooms}/pdu_metadata.rs | 0 src/database/key_value/rooms/search.rs | 964 +----------------- .../key_value/{room => rooms}/state.rs | 0 src/service/rooms/search/data.rs | 9 + src/service/rooms/search/mod.rs | 53 +- src/service/rooms/timeline/mod.rs | 15 +- 16 files changed, 36 insertions(+), 1006 deletions(-) delete mode 100644 src/database/key_value/room.rs rename src/database/key_value/{room => rooms}/alias.rs (100%) rename src/database/key_value/{room => rooms}/directory.rs (100%) rename src/database/key_value/{room => rooms}/edus/presence.rs (100%) rename src/database/key_value/{room => rooms}/edus/read_receipt.rs (100%) rename src/database/key_value/{room => rooms}/edus/typing.rs (100%) rename src/database/key_value/{room => rooms}/lazy_load.rs (100%) rename src/database/key_value/{room => rooms}/metadata.rs (100%) rename src/database/key_value/{room => rooms}/mod.rs (100%) rename src/database/key_value/{room => rooms}/outlier.rs (100%) rename src/database/key_value/{room => rooms}/pdu_metadata.rs (100%) rename src/database/key_value/{room => rooms}/state.rs (100%) create mode 100644 src/service/rooms/search/data.rs diff --git a/src/database/key_value/room.rs b/src/database/key_value/room.rs deleted file mode 100644 index 8bd6648e..00000000 --- a/src/database/key_value/room.rs +++ /dev/null @@ -1 +0,0 @@ -asdf diff --git a/src/database/key_value/room/alias.rs b/src/database/key_value/rooms/alias.rs similarity index 100% rename from src/database/key_value/room/alias.rs rename to src/database/key_value/rooms/alias.rs diff --git a/src/database/key_value/room/directory.rs b/src/database/key_value/rooms/directory.rs similarity index 100% rename from src/database/key_value/room/directory.rs rename to src/database/key_value/rooms/directory.rs diff --git a/src/database/key_value/room/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs similarity index 100% rename from src/database/key_value/room/edus/presence.rs rename to src/database/key_value/rooms/edus/presence.rs diff --git a/src/database/key_value/room/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs similarity index 100% rename from src/database/key_value/room/edus/read_receipt.rs rename to src/database/key_value/rooms/edus/read_receipt.rs diff --git a/src/database/key_value/room/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs similarity index 100% rename from src/database/key_value/room/edus/typing.rs rename to src/database/key_value/rooms/edus/typing.rs diff --git a/src/database/key_value/room/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs similarity index 100% rename from src/database/key_value/room/lazy_load.rs rename to src/database/key_value/rooms/lazy_load.rs diff --git a/src/database/key_value/room/metadata.rs b/src/database/key_value/rooms/metadata.rs similarity index 100% rename from src/database/key_value/room/metadata.rs rename to src/database/key_value/rooms/metadata.rs diff --git a/src/database/key_value/room/mod.rs b/src/database/key_value/rooms/mod.rs similarity index 100% rename from src/database/key_value/room/mod.rs rename to src/database/key_value/rooms/mod.rs diff --git a/src/database/key_value/room/outlier.rs b/src/database/key_value/rooms/outlier.rs similarity index 100% rename from src/database/key_value/room/outlier.rs rename to src/database/key_value/rooms/outlier.rs diff --git a/src/database/key_value/room/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs similarity index 100% rename from src/database/key_value/room/pdu_metadata.rs rename to src/database/key_value/rooms/pdu_metadata.rs diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 6a32e8b4..1ffffe56 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,956 +1,23 @@ +impl service::room::search::Data for KeyValueDatabase { - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - // TODO Is this the same as the function above? - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - - Ok(pdu_id) - } - - pub fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObj) { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version + fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()> { + let mut batch = body + .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .filter(|word| word.len() <= 50) + .map(str::to_lowercase) + .map(|word| { + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xff); + key.extend_from_slice(&pdu_id); + (key, Vec::new()) }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender_user.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - } - - /// Creates a new persisted data unit and adds it to a room. This function takes a - /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result> { - - let (pdu, pdu_json) = create_hash_and_sign_event()?; - - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - Ok(pdu.event_id) - } - - /// Append the incoming event setting the state snapshot to the state from the - /// server that sent the event. - #[tracing::instrument(skip_all)] - fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - Ok(Some(pdu_id)) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) + self.tokenids.insert_batch(&mut batch)?; } - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( + fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, @@ -997,4 +64,3 @@ ) })) } - diff --git a/src/database/key_value/room/state.rs b/src/database/key_value/rooms/state.rs similarity index 100% rename from src/database/key_value/room/state.rs rename to src/database/key_value/rooms/state.rs diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs new file mode 100644 index 00000000..1601e0de --- /dev/null +++ b/src/service/rooms/search/data.rs @@ -0,0 +1,9 @@ +pub trait Data { + pub fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()>; + + pub fn search_pdus<'a>( + &'a self, + room_id: &RoomId, + search_string: &str, + ) -> Result> + 'a, Vec)>>; +} diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index ce055058..5478273c 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,50 +1,19 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, ) -> Result> + 'a, Vec)>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .map(|iter| { - ( - iter.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - ) - })) + self.db.search_pdus(room_id, search_string) } - +} diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 6299b16c..5b423d2d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -439,20 +439,7 @@ .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; + DB.rooms.search.index_pdu(room_id, pdu_id, body)?; let admin_room = self.id_from_alias( <&RoomAliasId>::try_from( From c8f64844ab81ede5b52409a1c640692fea43e5c1 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 11:17:34 +0200 Subject: [PATCH 355/445] refactor: prepare service/rooms/auth_chain/mod.rs from service/rooms/state_compressor/mod.rs --- src/service/rooms/{state_compressor => auth_chain}/mod.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/{state_compressor => auth_chain}/mod.rs (100%) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/auth_chain/mod.rs similarity index 100% rename from src/service/rooms/state_compressor/mod.rs rename to src/service/rooms/auth_chain/mod.rs From 691e69847fff26cc83869c103c125d6cefbb0b99 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 11:17:34 +0200 Subject: [PATCH 356/445] refactor: prepare database/key_value/rooms/auth_chain.rs from service/rooms/state_compressor/mod.rs --- .../mod.rs => database/key_value/rooms/auth_chain.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state_compressor/mod.rs => database/key_value/rooms/auth_chain.rs} (100%) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/database/key_value/rooms/auth_chain.rs similarity index 100% rename from src/service/rooms/state_compressor/mod.rs rename to src/database/key_value/rooms/auth_chain.rs From 8d0ed3ec51e077ad710dd435be69c17b8a505e5f Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 11:17:34 +0200 Subject: [PATCH 357/445] refactor: prepare database/key_value/rooms/state_compressor.rs from service/rooms/state_compressor/mod.rs --- .../mod.rs => database/key_value/rooms/state_compressor.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state_compressor/mod.rs => database/key_value/rooms/state_compressor.rs} (100%) diff --git a/src/service/rooms/state_compressor/mod.rs b/src/database/key_value/rooms/state_compressor.rs similarity index 100% rename from src/service/rooms/state_compressor/mod.rs rename to src/database/key_value/rooms/state_compressor.rs From e045abe96182cf61b59853b056007927049fca93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 10 Jul 2022 17:23:26 +0200 Subject: [PATCH 358/445] refactor: work on auth chain and state compressor --- src/database/key_value/rooms/auth_chain.rs | 374 +----------------- .../key_value/rooms/state_compressor.rs | 338 +--------------- src/service/rooms/auth_chain/data.rs | 4 + src/service/rooms/auth_chain/mod.rs | 341 +--------------- src/service/rooms/state_compressor/data.rs | 10 + src/service/rooms/state_compressor/mod.rs | 121 +----- 6 files changed, 79 insertions(+), 1109 deletions(-) create mode 100644 src/service/rooms/auth_chain/data.rs create mode 100644 src/service/rooms/state_compressor/data.rs diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 197ce844..57dbb147 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,358 +1,24 @@ - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns the new shortstatehash - pub fn save_state( - room_id: &RoomId, - new_state_ids_compressed: HashSet, - ) -> Result<(u64, - HashSet, // added - HashSet)> // removed - { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - Ok((new_shortstatehash, statediffnew, statediffremoved)) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() +impl service::room::auth_chain::Data for KeyValueDatabase { + fn get_cached_eventid_authchain<'a>() -> Result> { + self.shorteventid_authchain + .get(&shorteventid.to_be_bytes())? + .map(|chain| { + chain + .chunks_exact(size_of::()) + .map(|chunk| { + utils::u64_from_bytes(chunk).expect("byte length is correct") }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) + .collect() + }) } - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) + fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result<()> { + shorteventid_authchain.insert( + &shorteventid.to_be_bytes(), + &auth_chain + .iter() + .flat_map(|s| s.to_be_bytes().to_vec()) + .collect::>(), + ) } +} diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 197ce844..71a2f3a0 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,26 +1,5 @@ - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - +impl service::room::state_compressor::Data for KeyValueDatabase { + fn get_statediff(shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff .get(&shortstatehash.to_be_bytes())? @@ -47,312 +26,23 @@ i += 2 * size_of::(); } - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) + StateDiff { parent, added, removed } } - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); + fn save_statediff(shortstatehash: u64, diff: StateDiff) -> Result<()> { + let mut value = diff.parent.to_be_bytes().to_vec(); + for new in &diff.new { + value.extend_from_slice(&new[..]); } - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); + if !diff.removed.is_empty() { + value.extend_from_slice(&0_u64.to_be_bytes()); + for removed in &diff.removed { + value.extend_from_slice(&removed[..]); } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns the new shortstatehash - pub fn save_state( - room_id: &RoomId, - new_state_ids_compressed: HashSet, - ) -> Result<(u64, - HashSet, // added - HashSet)> // removed - { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - Ok((new_shortstatehash, statediffnew, statediffremoved)) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); } - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) + self.shortstatehash_statediff + .insert(&shortstatehash.to_be_bytes(), &value)?; } +} diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs new file mode 100644 index 00000000..d8fde958 --- /dev/null +++ b/src/service/rooms/auth_chain/data.rs @@ -0,0 +1,4 @@ +pub trait Data { + fn get_cached_eventid_authchain<'a>() -> Result>; + fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result>; +} diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 197ce844..dfc289f3 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,327 +1,27 @@ +mod data; +pub use data::Data; - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } +use crate::service::*; - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns the new shortstatehash - pub fn save_state( - room_id: &RoomId, - new_state_ids_compressed: HashSet, - ) -> Result<(u64, - HashSet, // added - HashSet)> // removed - { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - Ok((new_shortstatehash, statediffnew, statediffremoved)) - } +pub struct Service { + db: D, +} +impl Service<_> { #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( + pub fn get_cached_eventid_authchain<'a>( &'a self, key: &[u64], ) -> Result>>> { // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { + if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key.to_be_bytes()) { return Ok(Some(Arc::clone(result))); } - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) + // We only save auth chains for single events in the db + if key.len == 1 { + // Check DB cache + if let Some(chain) = self.db.get_cached_eventid_authchain(key[0]) { let chain = Arc::new(chain); @@ -339,20 +39,15 @@ } #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db + pub fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { + // Only persist single events in db if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; + self.db.cache_auth_chain(key[0], auth_chain)?; } // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); + self.auth_chain_cache.lock().unwrap().insert(key, auth_chain); Ok(()) } +} diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs new file mode 100644 index 00000000..8b855cd2 --- /dev/null +++ b/src/service/rooms/state_compressor/data.rs @@ -0,0 +1,10 @@ +struct StateDiff { + parent: Option, + added: Vec, + removed: Vec, +} + +pub trait Data { + fn get_statediff(shortstatehash: u64) -> Result; + fn save_statediff(shortstatehash: u64, diff: StateDiff) -> Result<()>; +} diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 197ce844..d6d88e25 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,4 +1,13 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( @@ -21,31 +30,7 @@ return Ok(r.clone()); } - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } + self.db.get_statediff(shortstatehash)?; if parent != 0_u64 { let mut response = self.load_shortstatehash_info(parent)?; @@ -170,17 +155,7 @@ if parent_states.is_empty() { // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; + self.db.save_statediff(shortstatehash, StateDiff { parent: 0, new: statediffnew, removed: statediffremoved })?; return Ok(()); }; @@ -222,20 +197,7 @@ )?; } else { // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; + self.db.save_statediff(shortstatehash, StateDiff { parent: parent.0, new: statediffnew, removed: statediffremoved })?; } Ok(()) @@ -298,61 +260,4 @@ Ok((new_shortstatehash, statediffnew, statediffremoved)) } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } +} From 306ff5ee4e95e71284a2c3c2da17fe4ca0bc8162 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:11 +0200 Subject: [PATCH 359/445] refactor: prepare database/key_value/users.rs from service/users.rs --- src/{service => database/key_value}/users.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/users.rs (100%) diff --git a/src/service/users.rs b/src/database/key_value/users.rs similarity index 100% rename from src/service/users.rs rename to src/database/key_value/users.rs From e62b0904ea78d740a1c1f9bc7af264f17c458797 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:11 +0200 Subject: [PATCH 360/445] refactor: prepare database/key_value/pusher.rs from service/pusher.rs --- src/{service => database/key_value}/pusher.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/pusher.rs (100%) diff --git a/src/service/pusher.rs b/src/database/key_value/pusher.rs similarity index 100% rename from src/service/pusher.rs rename to src/database/key_value/pusher.rs From cb9458122cb49a7dc2e6994e587f0142e8ef3105 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:12 +0200 Subject: [PATCH 361/445] refactor: prepare service/pusher/data.rs from service/pusher.rs --- src/service/{pusher.rs => pusher/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{pusher.rs => pusher/data.rs} (100%) diff --git a/src/service/pusher.rs b/src/service/pusher/data.rs similarity index 100% rename from src/service/pusher.rs rename to src/service/pusher/data.rs From 70863260f65f2cffb24ce0ffdd686bce957a77f1 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:12 +0200 Subject: [PATCH 362/445] refactor: prepare service/pusher/mod.rs from service/pusher.rs --- src/service/{pusher.rs => pusher/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{pusher.rs => pusher/mod.rs} (100%) diff --git a/src/service/pusher.rs b/src/service/pusher/mod.rs similarity index 100% rename from src/service/pusher.rs rename to src/service/pusher/mod.rs From 94ce06bb76df4b163041fe9811b7439f443fbad8 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:12 +0200 Subject: [PATCH 363/445] refactor: prepare service/users/data.rs from service/users.rs --- src/service/{users.rs => users/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{users.rs => users/data.rs} (100%) diff --git a/src/service/users.rs b/src/service/users/data.rs similarity index 100% rename from src/service/users.rs rename to src/service/users/data.rs From dc7670f3a85210bd62c8f9138be1f1ec05524525 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:12 +0200 Subject: [PATCH 364/445] refactor: prepare service/users/mod.rs from service/users.rs --- src/service/{users.rs => users/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{users.rs => users/mod.rs} (100%) diff --git a/src/service/users.rs b/src/service/users/mod.rs similarity index 100% rename from src/service/users.rs rename to src/service/users/mod.rs From e8b33e8c5a16abb8763f0d6e2ff6fcb3ff956865 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:13 +0200 Subject: [PATCH 365/445] refactor: prepare service/rooms/timeline/data.rs from service/rooms/timeline/mod.rs --- src/service/rooms/timeline/{mod.rs => data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/timeline/{mod.rs => data.rs} (100%) diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/data.rs similarity index 100% rename from src/service/rooms/timeline/mod.rs rename to src/service/rooms/timeline/data.rs From bea5d1e0d8dc5c4395dcbae5c251915a91f6079d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 12:56:13 +0200 Subject: [PATCH 366/445] refactor: prepare database/key_value/rooms/timeline.rs from service/rooms/timeline/mod.rs --- .../timeline/mod.rs => database/key_value/rooms/timeline.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/timeline/mod.rs => database/key_value/rooms/timeline.rs} (100%) diff --git a/src/service/rooms/timeline/mod.rs b/src/database/key_value/rooms/timeline.rs similarity index 100% rename from src/service/rooms/timeline/mod.rs rename to src/database/key_value/rooms/timeline.rs From f56424bc8d8d582c52be91116ceb29d69791c563 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 7 Aug 2022 19:42:22 +0200 Subject: [PATCH 367/445] Refactor appservices, pusher, timeline, transactionids, users --- .../key_value}/appservice.rs | 24 +- src/database/key_value/pusher.rs | 302 +----- src/database/key_value/rooms/timeline.rs | 663 +----------- .../key_value}/transaction_ids.rs | 13 +- src/database/key_value/users.rs | 148 +-- src/service/appservice/data.rs | 17 + src/service/appservice/mod.rs | 36 + src/service/globals.rs | 14 +- src/service/pusher.rs | 348 ------- src/service/pusher/data.rs | 346 +------ src/service/pusher/mod.rs | 543 +++++----- src/service/rooms/short/mod.rs | 11 +- src/service/rooms/timeline/data.rs | 901 +--------------- src/service/rooms/timeline/mod.rs | 232 +---- src/service/transaction_ids/data.rs | 16 + src/service/transaction_ids/mod.rs | 44 + src/service/users/data.rs | 961 +----------------- src/service/users/mod.rs | 845 +-------------- 18 files changed, 530 insertions(+), 4934 deletions(-) rename src/{service => database/key_value}/appservice.rs (77%) rename src/{service => database/key_value}/transaction_ids.rs (77%) create mode 100644 src/service/appservice/data.rs create mode 100644 src/service/appservice/mod.rs delete mode 100644 src/service/pusher.rs create mode 100644 src/service/transaction_ids/data.rs create mode 100644 src/service/transaction_ids/mod.rs diff --git a/src/service/appservice.rs b/src/database/key_value/appservice.rs similarity index 77% rename from src/service/appservice.rs rename to src/database/key_value/appservice.rs index edd5009b..66a2a5c8 100644 --- a/src/service/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,19 +1,5 @@ -use crate::{utils, Error, Result}; -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; - -use super::abstraction::Tree; - -pub struct Appservice { - pub(super) cached_registrations: Arc>>, - pub(super) id_appserviceregistrations: Arc, -} - -impl Appservice { +impl service::appservice::Data for KeyValueDatabase { /// Registers an appservice and returns the ID to the caller - /// pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); @@ -34,7 +20,7 @@ impl Appservice { /// # Arguments /// /// * `service_name` - the name you send to register the service previously - pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { + fn unregister_appservice(&self, service_name: &str) -> Result<()> { self.id_appserviceregistrations .remove(service_name.as_bytes())?; self.cached_registrations @@ -44,7 +30,7 @@ impl Appservice { Ok(()) } - pub fn get_registration(&self, id: &str) -> Result> { + fn get_registration(&self, id: &str) -> Result> { self.cached_registrations .read() .unwrap() @@ -66,14 +52,14 @@ impl Appservice { ) } - pub fn iter_ids(&self) -> Result> + '_> { + fn iter_ids(&self) -> Result> + '_> { Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { utils::string_from_bytes(&id) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) })) } - pub fn all(&self) -> Result> { + fn all(&self) -> Result> { self.iter_ids()? .filter_map(|id| id.ok()) .map(move |id| { diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 6b906c24..94374ab2 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,36 +1,5 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; - -use std::{fmt::Debug, mem, sync::Arc}; - -use super::abstraction::Tree; - -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, -} - -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { +impl service::pusher::Data for KeyValueDatabase { + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(pusher.pushkey.as_bytes()); @@ -52,8 +21,7 @@ impl PushData { Ok(()) } - #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { + fn get_pusher(&self, senderkey: &[u8]) -> Result> { self.senderkey_pusher .get(senderkey)? .map(|push| { @@ -63,8 +31,7 @@ impl PushData { .transpose() } - #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { + fn get_pushers(&self, sender: &UserId) -> Result> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); @@ -77,8 +44,7 @@ impl PushData { .collect() } - #[tracing::instrument(skip(self, sender))] - pub fn get_pusher_senderkeys<'a>( + fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, ) -> impl Iterator> + 'a { @@ -88,261 +54,3 @@ impl PushData { self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) } } - -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Push gateway returned bad response.") - }) - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::v3::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; - } - }; - - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); - } - - notify = Some(n); - } - - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; - } - // Else the event triggered no actions - - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::v3::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } - - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url; - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } - - let d = &[device]; - let mut notifi = Notification::new(d); - - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); - - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } - - // TODO: email - - Ok(()) -} diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 5b423d2d..58884ec3 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,28 +1,5 @@ - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { +impl service::room::timeline::Data for KeyValueDatabase { + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache .lock() @@ -51,31 +28,8 @@ } } - // TODO Is this the same as the function above? - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - - /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + fn get_pdu_count(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map(|pdu_id| self.pdu_count(&pdu_id)) @@ -207,7 +161,6 @@ } /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( @@ -223,598 +176,8 @@ } } - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - DB.rooms.search.index_pdu(room_id, pdu_id, body)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - - Ok(pdu_id) - } - - pub fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObj) { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender_user.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - } - - /// Creates a new persisted data unit and adds it to a room. This function takes a - /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result> { - - let (pdu, pdu_json) = create_hash_and_sign_event()?; - - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - Ok(pdu.event_id) - } - - /// Append the incoming event setting the state snapshot to the state from the - /// server that sent the event. - #[tracing::instrument(skip_all)] - fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - Ok(Some(pdu_id)) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_since<'a>( &'a self, user_id: &UserId, @@ -849,7 +212,6 @@ /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_until<'a>( &'a self, user_id: &UserId, @@ -884,9 +246,6 @@ })) } - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_after<'a>( &'a self, user_id: &UserId, @@ -920,18 +279,4 @@ Ok((pdu_id, pdu)) })) } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - +} diff --git a/src/service/transaction_ids.rs b/src/database/key_value/transaction_ids.rs similarity index 77% rename from src/service/transaction_ids.rs rename to src/database/key_value/transaction_ids.rs index ed0970d1..81c1197d 100644 --- a/src/service/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,15 +1,4 @@ -use std::sync::Arc; - -use crate::Result; -use ruma::{DeviceId, TransactionId, UserId}; - -use super::abstraction::Tree; - -pub struct TransactionIds { - pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) -} - -impl TransactionIds { +impl service::pusher::Data for KeyValueDatabase { pub fn add_txnid( &self, user_id: &UserId, diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 7c15f1d8..5ef058f3 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,49 +1,10 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, StateEventType}, - serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, - UInt, UserId, -}; -use std::{collections::BTreeMap, mem, sync::Arc}; -use tracing::warn; - -use super::abstraction::Tree; - -pub struct Users { - pub(super) userid_password: Arc, - pub(super) userid_displayname: Arc, - pub(super) userid_avatarurl: Arc, - pub(super) userid_blurhash: Arc, - pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 - pub(super) token_userdeviceid: Arc, - - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: Arc, - pub(super) userid_selfsigningkeyid: Arc, - pub(super) userid_usersigningkeyid: Arc, - - pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId - - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count -} - -impl Users { +impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. - #[tracing::instrument(skip(self, user_id))] pub fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) } /// Check if account is deactivated - #[tracing::instrument(skip(self, user_id))] pub fn is_deactivated(&self, user_id: &UserId) -> Result { Ok(self .userid_password @@ -56,7 +17,6 @@ impl Users { } /// Check if a user is an admin - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn is_admin( &self, user_id: &UserId, @@ -71,20 +31,17 @@ impl Users { } /// Create a new user account on this homeserver. - #[tracing::instrument(skip(self, user_id, password))] pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { self.set_password(user_id, password)?; Ok(()) } /// Returns the number of users registered on this server. - #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } /// Find out which user an access token belongs to. - #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { self.token_userdeviceid .get(token.as_bytes())? @@ -112,7 +69,6 @@ impl Users { } /// Returns an iterator over all users on this homeserver. - #[tracing::instrument(skip(self))] pub fn iter(&self) -> impl Iterator>> + '_ { self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { @@ -125,7 +81,6 @@ impl Users { /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - #[tracing::instrument(skip(self))] pub fn list_local_users(&self) -> Result> { let users: Vec = self .userid_password @@ -139,7 +94,6 @@ impl Users { /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped /// and the error will be logged. - #[tracing::instrument(skip(self))] fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { // A valid password is not empty if password.is_empty() { @@ -159,7 +113,6 @@ impl Users { } /// Returns the password hash for the given user. - #[tracing::instrument(skip(self, user_id))] pub fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password .get(user_id.as_bytes())? @@ -171,7 +124,6 @@ impl Users { } /// Hash and set the user's password to the Argon2 hash - #[tracing::instrument(skip(self, user_id, password))] pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { if let Ok(hash) = utils::calculate_hash(password) { @@ -191,7 +143,6 @@ impl Users { } /// Returns the displayname of a user on this homeserver. - #[tracing::instrument(skip(self, user_id))] pub fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname .get(user_id.as_bytes())? @@ -203,7 +154,6 @@ impl Users { } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - #[tracing::instrument(skip(self, user_id, displayname))] pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { if let Some(displayname) = displayname { self.userid_displayname @@ -216,7 +166,6 @@ impl Users { } /// Get the avatar_url of a user. - #[tracing::instrument(skip(self, user_id))] pub fn avatar_url(&self, user_id: &UserId) -> Result>> { self.userid_avatarurl .get(user_id.as_bytes())? @@ -230,7 +179,6 @@ impl Users { } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, avatar_url))] pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl @@ -243,7 +191,6 @@ impl Users { } /// Get the blurhash of a user. - #[tracing::instrument(skip(self, user_id))] pub fn blurhash(&self, user_id: &UserId) -> Result> { self.userid_blurhash .get(user_id.as_bytes())? @@ -257,7 +204,6 @@ impl Users { } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, blurhash))] pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { if let Some(blurhash) = blurhash { self.userid_blurhash @@ -270,7 +216,6 @@ impl Users { } /// Adds a new device to a user. - #[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))] pub fn create_device( &self, user_id: &UserId, @@ -305,7 +250,6 @@ impl Users { } /// Removes a device from a user. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -336,7 +280,6 @@ impl Users { } /// Returns an iterator over all device ids of this user. - #[tracing::instrument(skip(self, user_id))] pub fn all_device_ids<'a>( &'a self, user_id: &UserId, @@ -359,7 +302,6 @@ impl Users { } /// Replaces the access token of one device. - #[tracing::instrument(skip(self, user_id, device_id, token))] pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -383,14 +325,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip( - self, - user_id, - device_id, - one_time_key_key, - one_time_key_value, - globals - ))] pub fn add_one_time_key( &self, user_id: &UserId, @@ -427,7 +361,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate .get(user_id.as_bytes())? @@ -439,7 +372,6 @@ impl Users { .unwrap_or(Ok(0)) } - #[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))] pub fn take_one_time_key( &self, user_id: &UserId, @@ -479,7 +411,6 @@ impl Users { .transpose() } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn count_one_time_keys( &self, user_id: &UserId, @@ -512,7 +443,6 @@ impl Users { Ok(counts) } - #[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))] pub fn add_device_keys( &self, user_id: &UserId, @@ -535,14 +465,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip( - self, - master_key, - self_signing_key, - user_signing_key, - rooms, - globals - ))] pub fn add_cross_signing_keys( &self, user_id: &UserId, @@ -658,7 +580,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))] pub fn sign_key( &self, target_id: &UserId, @@ -703,7 +624,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_or_room_id, from, to))] pub fn keys_changed<'a>( &'a self, user_or_room_id: &str, @@ -742,7 +662,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn mark_device_key_update( &self, user_id: &UserId, @@ -774,7 +693,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_keys( &self, user_id: &UserId, @@ -791,7 +709,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_master_key bool>( &self, user_id: &UserId, @@ -813,7 +730,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_self_signing_key bool>( &self, user_id: &UserId, @@ -835,7 +751,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id))] pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { self.userid_usersigningkeyid .get(user_id.as_bytes())? @@ -848,15 +763,6 @@ impl Users { }) } - #[tracing::instrument(skip( - self, - sender, - target_user_id, - target_device_id, - event_type, - content, - globals - ))] pub fn add_to_device_event( &self, sender: &UserId, @@ -884,7 +790,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_to_device_events( &self, user_id: &UserId, @@ -907,7 +812,6 @@ impl Users { Ok(events) } - #[tracing::instrument(skip(self, user_id, device_id, until))] pub fn remove_to_device_events( &self, user_id: &UserId, @@ -942,7 +846,6 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id, device))] pub fn update_device_metadata( &self, user_id: &UserId, @@ -968,7 +871,6 @@ impl Users { } /// Get device metadata. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_metadata( &self, user_id: &UserId, @@ -987,7 +889,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id))] pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { self.userid_devicelistversion .get(user_id.as_bytes())? @@ -998,7 +899,6 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id))] pub fn all_devices_metadata<'a>( &'a self, user_id: &UserId, @@ -1014,25 +914,7 @@ impl Users { }) } - /// Deactivate account - #[tracing::instrument(skip(self, user_id))] - pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { - // Remove all associated devices - for device_id in self.all_device_ids(user_id) { - self.remove_device(user_id, &device_id?)?; - } - - // Set the password to "" to indicate a deactivated account. Hashes will never result in an - // empty string, so the user will not be able to log in again. Systems like changing the - // password without logging in should check if the account is deactivated. - self.userid_password.insert(user_id.as_bytes(), &[])?; - - // TODO: Unhook 3PID - Ok(()) - } - /// Creates a new sync filter. Returns the filter id. - #[tracing::instrument(skip(self))] pub fn create_filter( &self, user_id: &UserId, @@ -1052,7 +934,6 @@ impl Users { Ok(filter_id) } - #[tracing::instrument(skip(self))] pub fn get_filter( &self, user_id: &UserId, @@ -1072,30 +953,3 @@ impl Users { } } } - -/// Ensure that a user only sees signatures from themselves and the target user -fn clean_signatures bool>( - cross_signing_key: &mut serde_json::Value, - user_id: &UserId, - allowed_signatures: F, -) -> Result<(), Error> { - if let Some(signatures) = cross_signing_key - .get_mut("signatures") - .and_then(|v| v.as_object_mut()) - { - // Don't allocate for the full size of the current signatures, but require - // at most one resize if nothing is dropped - let new_capacity = signatures.len() / 2; - for (user, signature) in - mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) - { - let id = <&UserId>::try_from(user.as_str()) - .map_err(|_| Error::bad_database("Invalid user ID in database."))?; - if id == user_id || allowed_signatures(id) { - signatures.insert(user, signature); - } - } - } - - Ok(()) -} diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs new file mode 100644 index 00000000..fe57451f --- /dev/null +++ b/src/service/appservice/data.rs @@ -0,0 +1,17 @@ +pub trait Data { + /// Registers an appservice and returns the ID to the caller + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; + + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + pub fn unregister_appservice(&self, service_name: &str) -> Result<()>; + + pub fn get_registration(&self, id: &str) -> Result>; + + pub fn iter_ids(&self) -> Result> + '_>; + + pub fn all(&self) -> Result>; +} diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs new file mode 100644 index 00000000..ec4ffc56 --- /dev/null +++ b/src/service/appservice/mod.rs @@ -0,0 +1,36 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { + /// Registers an appservice and returns the ID to the caller + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { + self.db.register_appservice(yaml) + } + + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { + self.db.unregister_appservice(service_name) + } + + pub fn get_registration(&self, id: &str) -> Result> { + self.db.get_registration(id) + } + + pub fn iter_ids(&self) -> Result> + '_> { + self.db.iter_ids() + } + + pub fn all(&self) -> Result> { + self.db.all() + } +} diff --git a/src/service/globals.rs b/src/service/globals.rs index 7e09128e..2b47e5b1 100644 --- a/src/service/globals.rs +++ b/src/service/globals.rs @@ -1,3 +1,8 @@ +mod data; +pub use data::Data; + +use crate::service::*; + use crate::{database::Config, server_server::FedDest, utils, Error, Result}; use ruma::{ api::{ @@ -32,10 +37,11 @@ type SyncHandle = ( Receiver>>, // rx ); -pub struct Globals { +pub struct Service { + db: D, + pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, - pub(super) globals: Arc, pub config: Config, keypair: Arc, dns_resolver: TokioAsyncResolver, @@ -44,7 +50,6 @@ pub struct Globals { default_client: reqwest::Client, pub stable_room_versions: Vec, pub unstable_room_versions: Vec, - pub(super) server_signingkeys: Arc, pub bad_event_ratelimiter: Arc, RateLimitState>>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc, Arc>>>, @@ -87,7 +92,8 @@ impl Default for RotationHandler { } } -impl Globals { + +impl Service<_> { pub fn load( globals: Arc, server_signingkeys: Arc, diff --git a/src/service/pusher.rs b/src/service/pusher.rs deleted file mode 100644 index 6b906c24..00000000 --- a/src/service/pusher.rs +++ /dev/null @@ -1,348 +0,0 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; - -use std::{fmt::Debug, mem, sync::Arc}; - -use super::abstraction::Tree; - -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, -} - -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { - let mut key = sender.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pusher.pushkey.as_bytes()); - - // There are 2 kinds of pushers but the spec says: null deletes the pusher. - if pusher.kind.is_none() { - return self - .senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into); - } - - self.senderkey_pusher.insert( - &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), - )?; - - Ok(()) - } - - #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.senderkey_pusher - .get(senderkey)? - .map(|push| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher - .scan_prefix(prefix) - .map(|(_, push)| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, sender))] - pub fn get_pusher_senderkeys<'a>( - &'a self, - sender: &UserId, - ) -> impl Iterator> + 'a { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) - } -} - -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Push gateway returned bad response.") - }) - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::v3::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; - } - }; - - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); - } - - notify = Some(n); - } - - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; - } - // Else the event triggered no actions - - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::v3::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } - - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url; - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } - - let d = &[device]; - let mut notifi = Notification::new(d); - - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); - - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } - - // TODO: email - - Ok(()) -} diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 6b906c24..468ad8b4 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,348 +1,12 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; +pub trait Data { + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; -use std::{fmt::Debug, mem, sync::Arc}; + pub fn get_pusher(&self, senderkey: &[u8]) -> Result>; -use super::abstraction::Tree; + pub fn get_pushers(&self, sender: &UserId) -> Result>; -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, -} - -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { - let mut key = sender.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pusher.pushkey.as_bytes()); - - // There are 2 kinds of pushers but the spec says: null deletes the pusher. - if pusher.kind.is_none() { - return self - .senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into); - } - - self.senderkey_pusher.insert( - &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), - )?; - - Ok(()) - } - - #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.senderkey_pusher - .get(senderkey)? - .map(|push| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher - .scan_prefix(prefix) - .map(|(_, push)| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, sender))] pub fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) - } -} - -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Push gateway returned bad response.") - }) - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::v3::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; - } - }; - - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); - } - - notify = Some(n); - } - - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; - } - // Else the event triggered no actions - - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::v3::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } - - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url; - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } - - let d = &[device]; - let mut notifi = Notification::new(d); - - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); - - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } - - // TODO: email - - Ok(()) + ) -> impl Iterator> + 'a; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 6b906c24..342763e8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,348 +1,287 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; - -use std::{fmt::Debug, mem, sync::Arc}; - -use super::abstraction::Tree; - -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, -} +mod data; +pub use data::Data; -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { - let mut key = sender.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pusher.pushkey.as_bytes()); - - // There are 2 kinds of pushers but the spec says: null deletes the pusher. - if pusher.kind.is_none() { - return self - .senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into); - } +use crate::service::*; - self.senderkey_pusher.insert( - &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), - )?; +pub struct Service { + db: D, +} - Ok(()) +impl Service<_> { + pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { + self.db.set_pusher(sender, pusher) } - #[tracing::instrument(skip(self, senderkey))] pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.senderkey_pusher - .get(senderkey)? - .map(|push| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .transpose() + self.db.get_pusher(senderkey) } - #[tracing::instrument(skip(self, sender))] pub fn get_pushers(&self, sender: &UserId) -> Result> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher - .scan_prefix(prefix) - .map(|(_, push)| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .collect() + self.db.get_pushers(sender) } - #[tracing::instrument(skip(self, sender))] pub fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, ) -> impl Iterator> + 'a { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) + self.db.get_pusher_senderkeys(sender) } -} -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::( - &destination, - SendAccessToken::IfRequired(""), - &[MatrixVersion::V1_0], - ) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) + #[tracing::instrument(skip(globals, destination, request))] + pub async fn send_request( + globals: &crate::database::globals::Globals, + destination: &str, + request: T, + ) -> Result + where + T: Debug, + { + let destination = destination.replace("/_matrix/push/v1/notify", ""); + + let http_request = request + .try_into_http_request::( + &destination, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) + .map_err(|e| { + warn!("Failed to find destination {}: {}", destination, e); + Error::BadServerResponse("Invalid destination") + })? + .map(|body| body.freeze()); + + let reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + // TODO: we could keep this very short and let expo backoff do it's thing... + //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); + + let url = reqwest_request.url().clone(); + let response = globals.default_client().execute(reqwest_request).await; + + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), ); - } - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { + info!( + "Push gateway returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + crate::utils::string_from_bytes(&body) + ); + } + + let response = T::IncomingResponse::try_from_http_response( + http_response_builder + .body(body) + .expect("reqwest body is valid http body"), ); - Error::BadServerResponse("Push gateway returned bad response.") - }) + response.map_err(|_| { + info!( + "Push gateway returned invalid response bytes {}\n{}", + destination, url + ); + Error::BadServerResponse("Push gateway returned bad response.") + }) + } + Err(e) => Err(e.into()), } - Err(e) => Err(e.into()), } -} -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::v3::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; + #[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] + pub async fn send_push_notice( + user: &UserId, + unread: UInt, + pusher: &get_pushers::v3::Pusher, + ruleset: Ruleset, + pdu: &PduEvent, + db: &Database, + ) -> Result<()> { + let mut notify = None; + let mut tweaks = Vec::new(); + + let power_levels: RoomPowerLevelsEventContent = db + .rooms + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + for action in get_actions( + user, + &ruleset, + &power_levels, + &pdu.to_sync_room_event(), + &pdu.room_id, + db, + )? { + let n = match action { + Action::DontNotify => false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => true, + Action::SetTweak(tweak) => { + tweaks.push(tweak.clone()); + continue; + } + }; + + if notify.is_some() { + return Err(Error::bad_database( + r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, + )); } - }; - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); + notify = Some(n); } - notify = Some(n); - } + if notify == Some(true) { + send_notice(unread, pusher, tweaks, pdu, db).await?; + } + // Else the event triggered no actions - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; + Ok(()) } - // Else the event triggered no actions - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.to_owned(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::v3::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } + #[tracing::instrument(skip(user, ruleset, pdu, db))] + pub fn get_actions<'a>( + user: &UserId, + ruleset: &'a Ruleset, + power_levels: &RoomPowerLevelsEventContent, + pdu: &Raw, + room_id: &RoomId, + db: &Database, + ) -> Result<&'a [Action]> { + let ctx = PushConditionRoomCtx { + room_id: room_id.to_owned(), + member_count: 10_u32.into(), // TODO: get member count efficiently + user_display_name: db + .users + .displayname(user)? + .unwrap_or_else(|| user.localpart().to_owned()), + users_power_levels: power_levels.users.clone(), + default_power_level: power_levels.users_default, + notification_power_levels: power_levels.notifications.clone(), + }; - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = data_minus_url; - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); + Ok(ruleset.get_actions(pdu, &ctx)) } - let d = &[device]; - let mut notifi = Notification::new(d); + #[tracing::instrument(skip(unread, pusher, tweaks, event, db))] + async fn send_notice( + unread: UInt, + pusher: &get_pushers::v3::Pusher, + tweaks: Vec, + event: &PduEvent, + db: &Database, + ) -> Result<()> { + // TODO: email + if pusher.kind == PusherKind::Email { + return Ok(()); + } - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); + // TODO: + // Two problems with this + // 1. if "event_id_only" is the only format kind it seems we should never add more info + // 2. can pusher/devices have conflicting formats + let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); + let url = if let Some(url) = &pusher.data.url { + url + } else { + error!("Http Pusher must have URL specified."); + return Ok(()); + }; - if event.kind == RoomEventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } + let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); + let mut data_minus_url = pusher.data.clone(); + // The url must be stripped off according to spec + data_minus_url.url = None; + device.data = data_minus_url; - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); - - if event.kind == RoomEventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + // Tweaks are only added if the format is NOT event_id_only + if !event_id_only { + device.tweaks = tweaks.clone(); } - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); + let d = &[device]; + let mut notifi = Notification::new(d); + + notifi.prio = NotificationPriority::Low; + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? + if event.kind == RoomEventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name + notifi.prio = NotificationPriority::High + } + + if event_id_only { + send_request( + &db.globals, + url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; } else { - None - }; + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + let content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.content = content.as_deref(); - notifi.room_name = room_name.as_deref(); + if event.kind == RoomEventType::RoomMember { + notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + } - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } + let user_name = db.users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); + + let room_name = if let Some(room_name_pdu) = + db.rooms + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? + { + serde_json::from_str::(room_name_pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid room name event in database."))? + .name + } else { + None + }; + + notifi.room_name = room_name.as_deref(); + + send_request( + &db.globals, + url, + send_event_notification::v1::Request::new(notifi), + ) + .await?; + } - // TODO: email + // TODO: email - Ok(()) + Ok(()) + } } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index c44d357c..a8e87b91 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,4 +1,13 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { pub fn get_or_create_shorteventid( &self, event_id: &EventId, @@ -222,4 +231,4 @@ } }) } - +} diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 5b423d2d..4e5c3796 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,816 +1,41 @@ - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } - } - - // TODO Is this the same as the function above? - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - +pub trait Data { + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } + fn get_pdu_count(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } + pub fn get_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } + pub fn get_pdu_id(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } + pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } + pub fn get_pdu(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } + pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } + pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } + pub fn pdu_count(&self, pdu_id: &[u8]) -> Result; /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu<'a>( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get( - None, - user, - GlobalAccountDataEventType::PushRules.to_string().into(), - )? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - RoomEventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - RoomEventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::parse(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - RoomEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - DB.rooms.search.index_pdu(room_id, pdu_id, body)?; - - let admin_room = self.id_from_alias( - <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), - ) - .expect("#admins:server_name is a valid room alias"), - )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); - - let to_conduit = body.starts_with(&format!("{}: ", server_user)); - - // This will evaluate to false if the emergency password is set up so that - // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); - - if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); - } - } - } - _ => {} - } - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - // If the RoomMember event has a non-empty state_key, it is targeted at someone. - // If it is our appservice user, we send this PDU to it. - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - if let Some(appservice_uid) = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }) - { - if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - } - } - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == RoomEventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - - Ok(pdu_id) - } - - pub fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObj) { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - // If there was no create event yet, assume we are creating a room with the default - // version right now - let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { - create_event.room_version - }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater").into(), - room_id: room_id.to_owned(), - sender: sender_user.to_owned(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) { - Ok(_) => {} - Err(e) => { - return match e { - ruma::signatures::Error::PduSize => Err(Error::BadRequest( - ErrorKind::TooLarge, - "Message is too long", - )), - _ => Err(Error::BadRequest( - ErrorKind::Unknown, - "Signing event failed", - )), - } - } - } - - // Generate event id - pdu.event_id = EventId::parse_arc(format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - } - - /// Creates a new persisted data unit and adds it to a room. This function takes a - /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result> { - - let (pdu, pdu_json) = create_hash_and_sign_event()?; - - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - iter::once(&*pdu.event_id), - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); - - // In case we are kicking or banning a user, we need to inform their server of the change - if pdu.kind == RoomEventType::RoomMember { - if let Some(state_key_uid) = &pdu - .state_key - .as_ref() - .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) - { - servers.insert(Box::from(state_key_uid.server_name())); - } - } - - // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); - - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; - - Ok(pdu.event_id) - } - - /// Append the incoming event setting the state snapshot to the state from the - /// server that sent the event. - #[tracing::instrument(skip_all)] - fn append_incoming_pdu<'a>( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; - - Ok(Some(pdu_id)) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. @@ -820,32 +45,7 @@ user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } + ) -> Result, PduEvent)>> + 'a>; /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. @@ -855,83 +55,12 @@ user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; + ) -> Result, PduEvent)>> + 'a>; - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - } - // If event does not exist, just noop - Ok(()) - } - + ) -> Result, PduEvent)>> + 'a>; +} diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 5b423d2d..c6393c68 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,4 +1,14 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { + /* /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { @@ -20,38 +30,15 @@ .next() .transpose() } + */ #[tracing::instrument(skip(self))] pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - match self - .lasttimelinecount_cache - .lock() - .unwrap() - .entry(room_id.to_owned()) - { - hash_map::Entry::Vacant(v) => { - if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .map(|(pduid, _)| self.pdu_count(&pduid)) - .next() - { - Ok(*v.insert(last_count?)) - } else { - Ok(0) - } - } - hash_map::Entry::Occupied(o) => Ok(*o.get()), - } + self.db.last_timeline_count(sender_user: &UserId, room_id: &RoomId) } // TODO Is this the same as the function above? + /* #[tracing::instrument(skip(self))] pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { let prefix = self @@ -71,33 +58,16 @@ .transpose() .map(|op| op.unwrap_or_default()) } - - + */ /// Returns the `count` of this pdu's id. pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() + self.db.get_pdu_count(event_id) } /// Returns the json of a pdu. pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + self.db.get_pdu_json(event_id) } /// Returns the json of a pdu. @@ -105,122 +75,49 @@ &self, event_id: &EventId, ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + self.db.get_non_outlier_pdu(event_id) } /// Returns the pdu's id. pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) + self.db.get_pdu_id(event_id) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() + self.db.get_non_outlier_pdu(event_id) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } + self.db.get_pdu(event_id) } /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.db.get_pdu_from_id(pdu_id) } /// Returns the pdu as a `BTreeMap`. pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) + self.db.get_pdu_json_from_id(pdu_id) } /// Returns the `count` of this pdu's id. pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) + self.db.pdu_count(pdu_id) } /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } + self.db.pdu_count(pdu_id, pdu: &PduEvent) } /// Creates a new persisted data unit and adds it to a room. @@ -803,7 +700,6 @@ } /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] pub fn all_pdus<'a>( &'a self, user_id: &UserId, @@ -814,37 +710,13 @@ /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. - #[tracing::instrument(skip(self))] pub fn pdus_since<'a>( &'a self, user_id: &UserId, room_id: &RoomId, since: u64, ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) + self.db.pdus_since(user_id, room_id, since) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -856,32 +728,7 @@ room_id: &RoomId, until: u64, ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) + self.db.pdus_until(user_id, room_id, until) } /// Returns an iterator over all events and their token in a room that happened after the event @@ -893,32 +740,7 @@ room_id: &RoomId, from: u64, ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.to_owned(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) + self.db.pdus_after(user_id, room_id, from) } /// Replace a PDU with the redacted form. diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs new file mode 100644 index 00000000..f1ff5f88 --- /dev/null +++ b/src/service/transaction_ids/data.rs @@ -0,0 +1,16 @@ +pub trait Data { + pub fn add_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + data: &[u8], + ) -> Result<()>; + + pub fn existing_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + ) -> Result>>; +} diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs new file mode 100644 index 00000000..d944847e --- /dev/null +++ b/src/service/transaction_ids/mod.rs @@ -0,0 +1,44 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { + pub fn add_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + data: &[u8], + ) -> Result<()> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); + key.push(0xff); + key.extend_from_slice(txn_id.as_bytes()); + + self.userdevicetxnid_response.insert(&key, data)?; + + Ok(()) + } + + pub fn existing_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + ) -> Result>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); + key.push(0xff); + key.extend_from_slice(txn_id.as_bytes()); + + // If there's no entry, this is a new transaction + self.userdevicetxnid_response.get(&key) + } +} diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 7c15f1d8..d99d0328 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,396 +1,86 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, StateEventType}, - serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, - UInt, UserId, -}; -use std::{collections::BTreeMap, mem, sync::Arc}; -use tracing::warn; - -use super::abstraction::Tree; - -pub struct Users { - pub(super) userid_password: Arc, - pub(super) userid_displayname: Arc, - pub(super) userid_avatarurl: Arc, - pub(super) userid_blurhash: Arc, - pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 - pub(super) token_userdeviceid: Arc, - - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: Arc, - pub(super) userid_selfsigningkeyid: Arc, - pub(super) userid_usersigningkeyid: Arc, - - pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId - - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count -} - -impl Users { +pub trait Data { /// Check if a user has an account on this homeserver. - #[tracing::instrument(skip(self, user_id))] - pub fn exists(&self, user_id: &UserId) -> Result { - Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) - } + pub fn exists(&self, user_id: &UserId) -> Result; /// Check if account is deactivated - #[tracing::instrument(skip(self, user_id))] - pub fn is_deactivated(&self, user_id: &UserId) -> Result { - Ok(self - .userid_password - .get(user_id.as_bytes())? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User does not exist.", - ))? - .is_empty()) - } + pub fn is_deactivated(&self, user_id: &UserId) -> Result; /// Check if a user is an admin - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn is_admin( &self, user_id: &UserId, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - - rooms.is_joined(user_id, &admin_room_id) - } + ) -> Result; /// Create a new user account on this homeserver. - #[tracing::instrument(skip(self, user_id, password))] - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.set_password(user_id, password)?; - Ok(()) - } + pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; /// Returns the number of users registered on this server. - #[tracing::instrument(skip(self))] - pub fn count(&self) -> Result { - Ok(self.userid_password.iter().count()) - } + pub fn count(&self) -> Result; /// Find out which user an access token belongs to. - #[tracing::instrument(skip(self, token))] - pub fn find_from_token(&self, token: &str) -> Result, String)>> { - self.token_userdeviceid - .get(token.as_bytes())? - .map_or(Ok(None), |bytes| { - let mut parts = bytes.split(|&b| b == 0xff); - let user_bytes = parts.next().ok_or_else(|| { - Error::bad_database("User ID in token_userdeviceid is invalid.") - })?; - let device_bytes = parts.next().ok_or_else(|| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") - })?; - - Ok(Some(( - UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - })?) - .map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid.") - })?, - utils::string_from_bytes(device_bytes).map_err(|_| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") - })?, - ))) - }) - } + pub fn find_from_token(&self, token: &str) -> Result, String)>>; /// Returns an iterator over all users on this homeserver. - #[tracing::instrument(skip(self))] - pub fn iter(&self) -> impl Iterator>> + '_ { - self.userid_password.iter().map(|(bytes, _)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in userid_password is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) - }) - } + pub fn iter(&self) -> impl Iterator>> + '_; /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - #[tracing::instrument(skip(self))] - pub fn list_local_users(&self) -> Result> { - let users: Vec = self - .userid_password - .iter() - .filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw)) - .collect(); - Ok(users) - } + pub fn list_local_users(&self) -> Result>; /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped /// and the error will be logged. - #[tracing::instrument(skip(self))] - fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { - // A valid password is not empty - if password.is_empty() { - None - } else { - match utils::string_from_bytes(username) { - Ok(u) => Some(u), - Err(e) => { - warn!( - "Failed to parse username while calling get_local_users(): {}", - e.to_string() - ); - None - } - } - } - } + fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option; /// Returns the password hash for the given user. - #[tracing::instrument(skip(self, user_id))] - pub fn password_hash(&self, user_id: &UserId) -> Result> { - self.userid_password - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Password hash in db is not valid string.") - })?)) - }) - } + pub fn password_hash(&self, user_id: &UserId) -> Result>; /// Hash and set the user's password to the Argon2 hash - #[tracing::instrument(skip(self, user_id, password))] - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - if let Some(password) = password { - if let Ok(hash) = utils::calculate_hash(password) { - self.userid_password - .insert(user_id.as_bytes(), hash.as_bytes())?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Password does not meet the requirements.", - )) - } - } else { - self.userid_password.insert(user_id.as_bytes(), b"")?; - Ok(()) - } - } + pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; /// Returns the displayname of a user on this homeserver. - #[tracing::instrument(skip(self, user_id))] - pub fn displayname(&self, user_id: &UserId) -> Result> { - self.userid_displayname - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Displayname in db is invalid.") - })?)) - }) - } + pub fn displayname(&self, user_id: &UserId) -> Result>; /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - #[tracing::instrument(skip(self, user_id, displayname))] - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { - if let Some(displayname) = displayname { - self.userid_displayname - .insert(user_id.as_bytes(), displayname.as_bytes())?; - } else { - self.userid_displayname.remove(user_id.as_bytes())?; - } - - Ok(()) - } + pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; /// Get the avatar_url of a user. - #[tracing::instrument(skip(self, user_id))] - pub fn avatar_url(&self, user_id: &UserId) -> Result>> { - self.userid_avatarurl - .get(user_id.as_bytes())? - .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - s.try_into() - .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) - }) - .transpose() - } + pub fn avatar_url(&self, user_id: &UserId) -> Result>>; /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, avatar_url))] - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { - if let Some(avatar_url) = avatar_url { - self.userid_avatarurl - .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; - } else { - self.userid_avatarurl.remove(user_id.as_bytes())?; - } - - Ok(()) - } + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()>; /// Get the blurhash of a user. - #[tracing::instrument(skip(self, user_id))] - pub fn blurhash(&self, user_id: &UserId) -> Result> { - self.userid_blurhash - .get(user_id.as_bytes())? - .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - - Ok(s) - }) - .transpose() - } + pub fn blurhash(&self, user_id: &UserId) -> Result>; /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, blurhash))] - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { - if let Some(blurhash) = blurhash { - self.userid_blurhash - .insert(user_id.as_bytes(), blurhash.as_bytes())?; - } else { - self.userid_blurhash.remove(user_id.as_bytes())?; - } - - Ok(()) - } + pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; /// Adds a new device to a user. - #[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))] pub fn create_device( &self, user_id: &UserId, device_id: &DeviceId, token: &str, initial_device_display_name: Option, - ) -> Result<()> { - // This method should never be called for nonexistent users. - assert!(self.exists(user_id)?); - - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(&Device { - device_id: device_id.into(), - display_name: initial_device_display_name, - last_seen_ip: None, // TODO - last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), - }) - .expect("Device::to_string never fails."), - )?; - - self.set_token(user_id, device_id, token)?; - - Ok(()) - } + ) -> Result<()>; /// Removes a device from a user. - #[tracing::instrument(skip(self, user_id, device_id))] - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Remove tokens - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.userdeviceid_token.remove(&userdeviceid)?; - self.token_userdeviceid.remove(&old_token)?; - } - - // Remove todevice events - let mut prefix = userdeviceid.clone(); - prefix.push(0xff); - - for (key, _) in self.todeviceid_events.scan_prefix(prefix) { - self.todeviceid_events.remove(&key)?; - } - - // TODO: Remove onetimekeys - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.remove(&userdeviceid)?; - - Ok(()) - } + pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; /// Returns an iterator over all device ids of this user. - #[tracing::instrument(skip(self, user_id))] pub fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - // All devices have metadata - self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? - .into()) - }) - } + ) -> impl Iterator>> + 'a; /// Replaces the access token of one device. - #[tracing::instrument(skip(self, user_id, device_id, token))] - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // All devices have metadata - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - - // Remove old token - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.token_userdeviceid.remove(&old_token)?; - // It will be removed from userdeviceid_token by the insert later - } + pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; - // Assign token to user device combination - self.userdeviceid_token - .insert(&userdeviceid, token.as_bytes())?; - self.token_userdeviceid - .insert(token.as_bytes(), &userdeviceid)?; - - Ok(()) - } - - #[tracing::instrument(skip( - self, - user_id, - device_id, - one_time_key_key, - one_time_key_value, - globals - ))] pub fn add_one_time_key( &self, user_id: &UserId, @@ -398,121 +88,24 @@ impl Users { one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, globals: &super::globals::Globals, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - - // All devices have metadata - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&key)?.is_some()); - - key.push(0xff); - // TODO: Use DeviceKeyId::to_string when it's available (and update everything, - // because there are no wrapping quotation marks anymore) - key.extend_from_slice( - serde_json::to_string(one_time_key_key) - .expect("DeviceKeyId::to_string always works") - .as_bytes(), - ); - - self.onetimekeyid_onetimekeys.insert( - &key, - &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), - )?; - - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - - Ok(()) - } + ) -> Result<()>; - #[tracing::instrument(skip(self, user_id))] - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self.userid_lastonetimekeyupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .unwrap_or(Ok(0)) - } + pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; - #[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))] pub fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, globals: &super::globals::Globals, - ) -> Result, Raw)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.push(b'"'); // Annoying quotation mark - prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); - prefix.push(b':'); + ) -> Result, Raw)>>; - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - - self.onetimekeyid_onetimekeys - .scan_prefix(prefix) - .next() - .map(|(key, value)| { - self.onetimekeyid_onetimekeys.remove(&key)?; - - Ok(( - serde_json::from_slice( - &*key - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, - serde_json::from_slice(&*value) - .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, - )) - }) - .transpose() - } - - #[tracing::instrument(skip(self, user_id, device_id))] pub fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - let mut counts = BTreeMap::new(); + ) -> Result>; - for algorithm in - self.onetimekeyid_onetimekeys - .scan_prefix(userdeviceid) - .map(|(bytes, _)| { - Ok::<_, Error>( - serde_json::from_slice::>( - &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { - Error::bad_database("OneTimeKey ID in db is invalid.") - })?, - ) - .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? - .algorithm(), - ) - }) - { - *counts.entry(algorithm?).or_default() += UInt::from(1_u32); - } - - Ok(counts) - } - - #[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))] pub fn add_device_keys( &self, user_id: &UserId, @@ -520,29 +113,8 @@ impl Users { device_keys: &Raw, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.keyid_key.insert( - &userdeviceid, - &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), - )?; + ) -> Result<()>; - self.mark_device_key_update(user_id, rooms, globals)?; - - Ok(()) - } - - #[tracing::instrument(skip( - self, - master_key, - self_signing_key, - user_signing_key, - rooms, - globals - ))] pub fn add_cross_signing_keys( &self, user_id: &UserId, @@ -551,114 +123,8 @@ impl Users { user_signing_key: &Option>, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result<()> { - // TODO: Check signatures - - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - // Master key - let mut master_key_ids = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? - .keys - .into_values(); - - let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained no key.", - ))?; - - if master_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained more than one key.", - )); - } - - let mut master_key_key = prefix.clone(); - master_key_key.extend_from_slice(master_key_id.as_bytes()); - - self.keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes())?; - - self.userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key)?; - - // Self-signing key - if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key - .deserialize() - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") - })? - .keys - .into_values(); - - let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained no key.", - ))?; - - if self_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained more than one key.", - )); - } - - let mut self_signing_key_key = prefix.clone(); - self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); + ) -> Result<()>; - self.keyid_key.insert( - &self_signing_key_key, - self_signing_key.json().get().as_bytes(), - )?; - - self.userid_selfsigningkeyid - .insert(user_id.as_bytes(), &self_signing_key_key)?; - } - - // User-signing key - if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") - })? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained no key.", - ))?; - - if user_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained more than one key.", - )); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); - - self.keyid_key.insert( - &user_signing_key_key, - user_signing_key.json().get().as_bytes(), - )?; - - self.userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key)?; - } - - self.mark_device_key_update(user_id, rooms, globals)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))] pub fn sign_key( &self, target_id: &UserId, @@ -667,196 +133,42 @@ impl Users { sender_id: &UserId, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result<()> { - let mut key = target_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(key_id.as_bytes()); - - let mut cross_signing_key: serde_json::Value = - serde_json::from_slice(&self.keyid_key.get(&key)?.ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to sign nonexistent key.", - ))?) - .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; - - let signatures = cross_signing_key - .get_mut("signatures") - .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? - .as_object_mut() - .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.to_owned()) - .or_insert_with(|| serde_json::Map::new().into()); - - signatures - .as_object_mut() - .ok_or_else(|| Error::bad_database("signatures in keyid_key for a user is invalid."))? - .insert(signature.0, signature.1.into()); - - self.keyid_key.insert( - &key, - &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), - )?; + ) -> Result<()>; - // TODO: Should we notify about this change? - self.mark_device_key_update(target_id, rooms, globals)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, user_or_room_id, from, to))] pub fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator>> + 'a { - let mut prefix = user_or_room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut start = prefix.clone(); - start.extend_from_slice(&(from + 1).to_be_bytes()); - - let to = to.unwrap_or(u64::MAX); + ) -> impl Iterator>> + 'a; - self.keychangeid_userid - .iter_from(&start, false) - .take_while(move |(k, _)| { - k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { - if let Ok(c) = utils::u64_from_bytes(current) { - c <= to - } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); - false - } - } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); - false - } - }) - .map(|(_, bytes)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - }) - } - - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn mark_device_key_update( &self, user_id: &UserId, rooms: &super::rooms::Rooms, globals: &super::globals::Globals, - ) -> Result<()> { - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - // Don't send key updates to unencrypted rooms - if rooms - .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? - .is_none() - { - continue; - } - - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - } + ) -> Result<()>; - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("DeviceKeys in db are invalid.") - })?)) - }) - } + ) -> Result>>; - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_master_key bool>( &self, user_id: &UserId, allowed_signatures: F, - ) -> Result>> { - self.userid_masterkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + ) -> Result>>; - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - }) - } - - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_self_signing_key bool>( &self, user_id: &UserId, allowed_signatures: F, - ) -> Result>> { - self.userid_selfsigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - }) - } + ) -> Result>>; - #[tracing::instrument(skip(self, user_id))] - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { - self.userid_usersigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?)) - }) - }) - } + pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; - #[tracing::instrument(skip( - self, - sender, - target_user_id, - target_device_id, - event_type, - content, - globals - ))] pub fn add_to_device_event( &self, sender: &UserId, @@ -865,237 +177,52 @@ impl Users { event_type: &str, content: serde_json::Value, globals: &super::globals::Globals, - ) -> Result<()> { - let mut key = target_user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(target_device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(&globals.next_count()?.to_be_bytes()); - - let mut json = serde_json::Map::new(); - json.insert("type".to_owned(), event_type.to_owned().into()); - json.insert("sender".to_owned(), sender.to_string().into()); - json.insert("content".to_owned(), content); + ) -> Result<()>; - let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); - - self.todeviceid_events.insert(&key, &value)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result>> { - let mut events = Vec::new(); - - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); + ) -> Result>>; - for (_, value) in self.todeviceid_events.scan_prefix(prefix) { - events.push( - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, - ); - } - - Ok(events) - } - - #[tracing::instrument(skip(self, user_id, device_id, until))] pub fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, until: u64, - ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); + ) -> Result<()>; - for (key, _) in self - .todeviceid_events - .iter_from(&last, true) // this includes last - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) - .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, count)| count <= until) - { - self.todeviceid_events.remove(&key)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, user_id, device_id, device))] pub fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, device: &Device, - ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(device).expect("Device::to_string always works"), - )?; - - Ok(()) - } + ) -> Result<()>; /// Get device metadata. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); + ) -> Result>; - self.userdeviceid_metadata - .get(&userdeviceid)? - .map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("Metadata in userdeviceid_metadata is invalid.") - })?)) - }) - } + pub fn get_devicelist_version(&self, user_id: &UserId) -> Result>; - #[tracing::instrument(skip(self, user_id))] - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.userid_devicelistversion - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid devicelistversion in db.")) - .map(Some) - }) - } - - #[tracing::instrument(skip(self, user_id))] pub fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - - self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - }) - } - - /// Deactivate account - #[tracing::instrument(skip(self, user_id))] - pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { - // Remove all associated devices - for device_id in self.all_device_ids(user_id) { - self.remove_device(user_id, &device_id?)?; - } - - // Set the password to "" to indicate a deactivated account. Hashes will never result in an - // empty string, so the user will not be able to log in again. Systems like changing the - // password without logging in should check if the account is deactivated. - self.userid_password.insert(user_id.as_bytes(), &[])?; - - // TODO: Unhook 3PID - Ok(()) - } + ) -> impl Iterator> + 'a; /// Creates a new sync filter. Returns the filter id. - #[tracing::instrument(skip(self))] pub fn create_filter( &self, user_id: &UserId, filter: &IncomingFilterDefinition, - ) -> Result { - let filter_id = utils::random_string(4); - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(filter_id.as_bytes()); - - self.userfilterid_filter.insert( - &key, - &serde_json::to_vec(&filter).expect("filter is valid json"), - )?; + ) -> Result; - Ok(filter_id) - } - - #[tracing::instrument(skip(self))] pub fn get_filter( &self, user_id: &UserId, filter_id: &str, - ) -> Result> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(filter_id.as_bytes()); - - let raw = self.userfilterid_filter.get(&key)?; - - if let Some(raw) = raw { - serde_json::from_slice(&raw) - .map_err(|_| Error::bad_database("Invalid filter event in db.")) - } else { - Ok(None) - } - } -} - -/// Ensure that a user only sees signatures from themselves and the target user -fn clean_signatures bool>( - cross_signing_key: &mut serde_json::Value, - user_id: &UserId, - allowed_signatures: F, -) -> Result<(), Error> { - if let Some(signatures) = cross_signing_key - .get_mut("signatures") - .and_then(|v| v.as_object_mut()) - { - // Don't allocate for the full size of the current signatures, but require - // at most one resize if nothing is dropped - let new_capacity = signatures.len() / 2; - for (user, signature) in - mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) - { - let id = <&UserId>::try_from(user.as_str()) - .map_err(|_| Error::bad_database("Invalid user ID in database."))?; - if id == user_id || allowed_signatures(id) { - signatures.insert(user, signature); - } - } - } - - Ok(()) + ) -> Result>; } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 7c15f1d8..93d6ea52 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,276 +1,107 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, - encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, StateEventType}, - serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, - UInt, UserId, -}; -use std::{collections::BTreeMap, mem, sync::Arc}; -use tracing::warn; +mod data; +pub use data::Data; -use super::abstraction::Tree; +use crate::service::*; -pub struct Users { - pub(super) userid_password: Arc, - pub(super) userid_displayname: Arc, - pub(super) userid_avatarurl: Arc, - pub(super) userid_blurhash: Arc, - pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 - pub(super) token_userdeviceid: Arc, - - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: Arc, - pub(super) userid_selfsigningkeyid: Arc, - pub(super) userid_usersigningkeyid: Arc, - - pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId - - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count +pub struct Service { + db: D, } -impl Users { +impl Service<_> { /// Check if a user has an account on this homeserver. - #[tracing::instrument(skip(self, user_id))] pub fn exists(&self, user_id: &UserId) -> Result { - Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) + self.db.exists(user_id) } /// Check if account is deactivated - #[tracing::instrument(skip(self, user_id))] pub fn is_deactivated(&self, user_id: &UserId) -> Result { - Ok(self - .userid_password - .get(user_id.as_bytes())? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User does not exist.", - ))? - .is_empty()) + self.db.is_deactivated(user_id) } /// Check if a user is an admin - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn is_admin( &self, user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - - rooms.is_joined(user_id, &admin_room_id) + self.db.is_admin(user_id) } /// Create a new user account on this homeserver. - #[tracing::instrument(skip(self, user_id, password))] pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.set_password(user_id, password)?; - Ok(()) + self.db.set_password(user_id, password) } /// Returns the number of users registered on this server. - #[tracing::instrument(skip(self))] pub fn count(&self) -> Result { - Ok(self.userid_password.iter().count()) + self.db.count() } /// Find out which user an access token belongs to. - #[tracing::instrument(skip(self, token))] pub fn find_from_token(&self, token: &str) -> Result, String)>> { - self.token_userdeviceid - .get(token.as_bytes())? - .map_or(Ok(None), |bytes| { - let mut parts = bytes.split(|&b| b == 0xff); - let user_bytes = parts.next().ok_or_else(|| { - Error::bad_database("User ID in token_userdeviceid is invalid.") - })?; - let device_bytes = parts.next().ok_or_else(|| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") - })?; - - Ok(Some(( - UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid unicode.") - })?) - .map_err(|_| { - Error::bad_database("User ID in token_userdeviceid is invalid.") - })?, - utils::string_from_bytes(device_bytes).map_err(|_| { - Error::bad_database("Device ID in token_userdeviceid is invalid.") - })?, - ))) - }) + self.db.find_from_token(token) } /// Returns an iterator over all users on this homeserver. - #[tracing::instrument(skip(self))] pub fn iter(&self) -> impl Iterator>> + '_ { - self.userid_password.iter().map(|(bytes, _)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in userid_password is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) - }) + self.db.iter() } /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - #[tracing::instrument(skip(self))] pub fn list_local_users(&self) -> Result> { - let users: Vec = self - .userid_password - .iter() - .filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw)) - .collect(); - Ok(users) + self.db.list_local_users() } /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. /// If utils::string_from_bytes(...) returns an error that username will be skipped /// and the error will be logged. - #[tracing::instrument(skip(self))] fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { - // A valid password is not empty - if password.is_empty() { - None - } else { - match utils::string_from_bytes(username) { - Ok(u) => Some(u), - Err(e) => { - warn!( - "Failed to parse username while calling get_local_users(): {}", - e.to_string() - ); - None - } - } - } + self.db.get_username_with_valid_password(username, password) } /// Returns the password hash for the given user. - #[tracing::instrument(skip(self, user_id))] pub fn password_hash(&self, user_id: &UserId) -> Result> { - self.userid_password - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Password hash in db is not valid string.") - })?)) - }) + self.db.password_hash(user_id) } /// Hash and set the user's password to the Argon2 hash - #[tracing::instrument(skip(self, user_id, password))] pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - if let Some(password) = password { - if let Ok(hash) = utils::calculate_hash(password) { - self.userid_password - .insert(user_id.as_bytes(), hash.as_bytes())?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Password does not meet the requirements.", - )) - } - } else { - self.userid_password.insert(user_id.as_bytes(), b"")?; - Ok(()) - } + self.db.set_password(user_id, password) } /// Returns the displayname of a user on this homeserver. - #[tracing::instrument(skip(self, user_id))] pub fn displayname(&self, user_id: &UserId) -> Result> { - self.userid_displayname - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Displayname in db is invalid.") - })?)) - }) + self.db.displayname(user_id) } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - #[tracing::instrument(skip(self, user_id, displayname))] pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { - if let Some(displayname) = displayname { - self.userid_displayname - .insert(user_id.as_bytes(), displayname.as_bytes())?; - } else { - self.userid_displayname.remove(user_id.as_bytes())?; - } - - Ok(()) + self.db.set_displayname(user_id, displayname) } /// Get the avatar_url of a user. - #[tracing::instrument(skip(self, user_id))] pub fn avatar_url(&self, user_id: &UserId) -> Result>> { - self.userid_avatarurl - .get(user_id.as_bytes())? - .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - s.try_into() - .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) - }) - .transpose() + self.db.avatar_url(user_id) } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, avatar_url))] pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { - if let Some(avatar_url) = avatar_url { - self.userid_avatarurl - .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; - } else { - self.userid_avatarurl.remove(user_id.as_bytes())?; - } - - Ok(()) + self.db.set_avatar_url(user_id, avatar_url) } /// Get the blurhash of a user. - #[tracing::instrument(skip(self, user_id))] pub fn blurhash(&self, user_id: &UserId) -> Result> { - self.userid_blurhash - .get(user_id.as_bytes())? - .map(|bytes| { - let s = utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - - Ok(s) - }) - .transpose() + self.db.blurhash(user_id) } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, blurhash))] pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { - if let Some(blurhash) = blurhash { - self.userid_blurhash - .insert(user_id.as_bytes(), blurhash.as_bytes())?; - } else { - self.userid_blurhash.remove(user_id.as_bytes())?; - } - - Ok(()) + self.db.set_blurhash(user_id, blurhash) } /// Adds a new device to a user. - #[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))] pub fn create_device( &self, user_id: &UserId, @@ -278,119 +109,27 @@ impl Users { token: &str, initial_device_display_name: Option, ) -> Result<()> { - // This method should never be called for nonexistent users. - assert!(self.exists(user_id)?); - - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(&Device { - device_id: device_id.into(), - display_name: initial_device_display_name, - last_seen_ip: None, // TODO - last_seen_ts: Some(MilliSecondsSinceUnixEpoch::now()), - }) - .expect("Device::to_string never fails."), - )?; - - self.set_token(user_id, device_id, token)?; - - Ok(()) + self.db.create_device(user_id, device_id, token, initial_device_display_name) } /// Removes a device from a user. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Remove tokens - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.userdeviceid_token.remove(&userdeviceid)?; - self.token_userdeviceid.remove(&old_token)?; - } - - // Remove todevice events - let mut prefix = userdeviceid.clone(); - prefix.push(0xff); - - for (key, _) in self.todeviceid_events.scan_prefix(prefix) { - self.todeviceid_events.remove(&key)?; - } - - // TODO: Remove onetimekeys - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.remove(&userdeviceid)?; - - Ok(()) + self.db.remove_device(user_id, device_id) } /// Returns an iterator over all device ids of this user. - #[tracing::instrument(skip(self, user_id))] pub fn all_device_ids<'a>( &'a self, user_id: &UserId, ) -> impl Iterator>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - // All devices have metadata - self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? - .into()) - }) + self.db.all_device_ids(user_id) } /// Replaces the access token of one device. - #[tracing::instrument(skip(self, user_id, device_id, token))] pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // All devices have metadata - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - - // Remove old token - if let Some(old_token) = self.userdeviceid_token.get(&userdeviceid)? { - self.token_userdeviceid.remove(&old_token)?; - // It will be removed from userdeviceid_token by the insert later - } - - // Assign token to user device combination - self.userdeviceid_token - .insert(&userdeviceid, token.as_bytes())?; - self.token_userdeviceid - .insert(token.as_bytes(), &userdeviceid)?; - - Ok(()) + self.db.set_token(user_id, device_id, token) } - #[tracing::instrument(skip( - self, - user_id, - device_id, - one_time_key_key, - one_time_key_value, - globals - ))] pub fn add_one_time_key( &self, user_id: &UserId, @@ -399,464 +138,103 @@ impl Users { one_time_key_value: &Raw, globals: &super::globals::Globals, ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - - // All devices have metadata - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&key)?.is_some()); - - key.push(0xff); - // TODO: Use DeviceKeyId::to_string when it's available (and update everything, - // because there are no wrapping quotation marks anymore) - key.extend_from_slice( - serde_json::to_string(one_time_key_key) - .expect("DeviceKeyId::to_string always works") - .as_bytes(), - ); - - self.onetimekeyid_onetimekeys.insert( - &key, - &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), - )?; - - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - - Ok(()) + self.db.add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) } - #[tracing::instrument(skip(self, user_id))] pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { - self.userid_lastonetimekeyupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .unwrap_or(Ok(0)) + self.db.last_one_time_keys_update(user_id) } - #[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))] pub fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - globals: &super::globals::Globals, ) -> Result, Raw)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.push(b'"'); // Annoying quotation mark - prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); - prefix.push(b':'); - - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - - self.onetimekeyid_onetimekeys - .scan_prefix(prefix) - .next() - .map(|(key, value)| { - self.onetimekeyid_onetimekeys.remove(&key)?; - - Ok(( - serde_json::from_slice( - &*key - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, - serde_json::from_slice(&*value) - .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, - )) - }) - .transpose() + self.db.take_one_time_key(user_id, device_id, key_algorithm) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - let mut counts = BTreeMap::new(); - - for algorithm in - self.onetimekeyid_onetimekeys - .scan_prefix(userdeviceid) - .map(|(bytes, _)| { - Ok::<_, Error>( - serde_json::from_slice::>( - &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { - Error::bad_database("OneTimeKey ID in db is invalid.") - })?, - ) - .map_err(|_| Error::bad_database("DeviceKeyId in db is invalid."))? - .algorithm(), - ) - }) - { - *counts.entry(algorithm?).or_default() += UInt::from(1_u32); - } - - Ok(counts) + self.db.count_one_time_keys(user_id, device_id) } - #[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))] pub fn add_device_keys( &self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.keyid_key.insert( - &userdeviceid, - &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), - )?; - - self.mark_device_key_update(user_id, rooms, globals)?; - - Ok(()) + self.db.add_device_keys(user_id, device_id, device_keys) } - #[tracing::instrument(skip( - self, - master_key, - self_signing_key, - user_signing_key, - rooms, - globals - ))] pub fn add_cross_signing_keys( &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - // TODO: Check signatures - - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - // Master key - let mut master_key_ids = master_key - .deserialize() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? - .keys - .into_values(); - - let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained no key.", - ))?; - - if master_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Master key contained more than one key.", - )); - } - - let mut master_key_key = prefix.clone(); - master_key_key.extend_from_slice(master_key_id.as_bytes()); - - self.keyid_key - .insert(&master_key_key, master_key.json().get().as_bytes())?; - - self.userid_masterkeyid - .insert(user_id.as_bytes(), &master_key_key)?; - - // Self-signing key - if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key - .deserialize() - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") - })? - .keys - .into_values(); - - let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained no key.", - ))?; - - if self_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Self signing key contained more than one key.", - )); - } - - let mut self_signing_key_key = prefix.clone(); - self_signing_key_key.extend_from_slice(self_signing_key_id.as_bytes()); - - self.keyid_key.insert( - &self_signing_key_key, - self_signing_key.json().get().as_bytes(), - )?; - - self.userid_selfsigningkeyid - .insert(user_id.as_bytes(), &self_signing_key_key)?; - } - - // User-signing key - if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key - .deserialize() - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") - })? - .keys - .into_values(); - - let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained no key.", - ))?; - - if user_signing_key_ids.next().is_some() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "User signing key contained more than one key.", - )); - } - - let mut user_signing_key_key = prefix; - user_signing_key_key.extend_from_slice(user_signing_key_id.as_bytes()); - - self.keyid_key.insert( - &user_signing_key_key, - user_signing_key.json().get().as_bytes(), - )?; - - self.userid_usersigningkeyid - .insert(user_id.as_bytes(), &user_signing_key_key)?; - } - - self.mark_device_key_update(user_id, rooms, globals)?; - - Ok(()) + self.db.add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) } - #[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))] pub fn sign_key( &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - let mut key = target_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(key_id.as_bytes()); - - let mut cross_signing_key: serde_json::Value = - serde_json::from_slice(&self.keyid_key.get(&key)?.ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to sign nonexistent key.", - ))?) - .map_err(|_| Error::bad_database("key in keyid_key is invalid."))?; - - let signatures = cross_signing_key - .get_mut("signatures") - .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? - .as_object_mut() - .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.to_owned()) - .or_insert_with(|| serde_json::Map::new().into()); - - signatures - .as_object_mut() - .ok_or_else(|| Error::bad_database("signatures in keyid_key for a user is invalid."))? - .insert(signature.0, signature.1.into()); - - self.keyid_key.insert( - &key, - &serde_json::to_vec(&cross_signing_key).expect("CrossSigningKey::to_vec always works"), - )?; - - // TODO: Should we notify about this change? - self.mark_device_key_update(target_id, rooms, globals)?; - - Ok(()) + self.db.sign_key(target_id, key_id, signature, sender_id) } - #[tracing::instrument(skip(self, user_or_room_id, from, to))] pub fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, to: Option, ) -> impl Iterator>> + 'a { - let mut prefix = user_or_room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut start = prefix.clone(); - start.extend_from_slice(&(from + 1).to_be_bytes()); - - let to = to.unwrap_or(u64::MAX); - - self.keychangeid_userid - .iter_from(&start, false) - .take_while(move |(k, _)| { - k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { - if let Ok(c) = utils::u64_from_bytes(current) { - c <= to - } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); - false - } - } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); - false - } - }) - .map(|(_, bytes)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - }) + self.db.keys_changed(user_or_room_id, from, to) } - #[tracing::instrument(skip(self, user_id, rooms, globals))] pub fn mark_device_key_update( &self, user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - // Don't send key updates to unencrypted rooms - if rooms - .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? - .is_none() - { - continue; - } - - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - } - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&count); - self.keychangeid_userid.insert(&key, user_id.as_bytes())?; - - Ok(()) + self.db.mark_device_key_update(user_id) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.as_bytes()); - - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("DeviceKeys in db are invalid.") - })?)) - }) + self.db.get_device_keys(user_id, device_id) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_master_key bool>( &self, user_id: &UserId, allowed_signatures: F, ) -> Result>> { - self.userid_masterkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - }) + self.db.get_master_key(user_id, allow_signatures) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] pub fn get_self_signing_key bool>( &self, user_id: &UserId, allowed_signatures: F, ) -> Result>> { - self.userid_selfsigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; - clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; - - Ok(Some(Raw::from_json( - serde_json::value::to_raw_value(&cross_signing_key) - .expect("Value to RawValue serialization"), - ))) - }) - }) + self.db.get_self_signing_key(user_id, allowed_signatures) } - #[tracing::instrument(skip(self, user_id))] pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { - self.userid_usersigningkeyid - .get(user_id.as_bytes())? - .map_or(Ok(None), |key| { - self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?)) - }) - }) + self.db.get_user_signing_key(user_id) } - #[tracing::instrument(skip( - self, - sender, - target_user_id, - target_device_id, - event_type, - content, - globals - ))] pub fn add_to_device_event( &self, sender: &UserId, @@ -864,158 +242,57 @@ impl Users { target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, - globals: &super::globals::Globals, ) -> Result<()> { - let mut key = target_user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(target_device_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(&globals.next_count()?.to_be_bytes()); - - let mut json = serde_json::Map::new(); - json.insert("type".to_owned(), event_type.to_owned().into()); - json.insert("sender".to_owned(), sender.to_string().into()); - json.insert("content".to_owned(), content); - - let value = serde_json::to_vec(&json).expect("Map::to_vec always works"); - - self.todeviceid_events.insert(&key, &value)?; - - Ok(()) + self.db.add_to_device_event(sender, target_user_id, target_device_id, event_type, content) } - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>> { - let mut events = Vec::new(); - - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - - for (_, value) in self.todeviceid_events.scan_prefix(prefix) { - events.push( - serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Event in todeviceid_events is invalid."))?, - ); - } - - Ok(events) + self.get_to_device_events(user_id, device_id) } - #[tracing::instrument(skip(self, user_id, device_id, until))] pub fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, until: u64, ) -> Result<()> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - - let mut last = prefix.clone(); - last.extend_from_slice(&until.to_be_bytes()); - - for (key, _) in self - .todeviceid_events - .iter_from(&last, true) // this includes last - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) - .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, count)| count <= until) - { - self.todeviceid_events.remove(&key)?; - } - - Ok(()) + self.db.remove_to_device_events(user_id, device_id, until) } - #[tracing::instrument(skip(self, user_id, device_id, device))] pub fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, device: &Device, ) -> Result<()> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - // Only existing devices should be able to call this. - assert!(self.userdeviceid_metadata.get(&userdeviceid)?.is_some()); - - self.userid_devicelistversion - .increment(user_id.as_bytes())?; - - self.userdeviceid_metadata.insert( - &userdeviceid, - &serde_json::to_vec(device).expect("Device::to_string always works"), - )?; - - Ok(()) + self.db.update_device_metadata(user_id, device_id, device) } /// Get device metadata. - #[tracing::instrument(skip(self, user_id, device_id))] pub fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result> { - let mut userdeviceid = user_id.as_bytes().to_vec(); - userdeviceid.push(0xff); - userdeviceid.extend_from_slice(device_id.as_bytes()); - - self.userdeviceid_metadata - .get(&userdeviceid)? - .map_or(Ok(None), |bytes| { - Ok(Some(serde_json::from_slice(&bytes).map_err(|_| { - Error::bad_database("Metadata in userdeviceid_metadata is invalid.") - })?)) - }) + self.get_device_metadata(user_id, device_id) } - #[tracing::instrument(skip(self, user_id))] pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.userid_devicelistversion - .get(user_id.as_bytes())? - .map_or(Ok(None), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid devicelistversion in db.")) - .map(Some) - }) + self.db.devicelist_version(user_id) } - #[tracing::instrument(skip(self, user_id))] pub fn all_devices_metadata<'a>( &'a self, user_id: &UserId, ) -> impl Iterator> + 'a { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - - self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - }) + self.db.all_devices_metadata(user_id) } /// Deactivate account - #[tracing::instrument(skip(self, user_id))] pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { // Remove all associated devices for device_id in self.all_device_ids(user_id) { @@ -1032,44 +309,20 @@ impl Users { } /// Creates a new sync filter. Returns the filter id. - #[tracing::instrument(skip(self))] pub fn create_filter( &self, user_id: &UserId, filter: &IncomingFilterDefinition, ) -> Result { - let filter_id = utils::random_string(4); - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(filter_id.as_bytes()); - - self.userfilterid_filter.insert( - &key, - &serde_json::to_vec(&filter).expect("filter is valid json"), - )?; - - Ok(filter_id) + self.db.create_filter(user_id, filter) } - #[tracing::instrument(skip(self))] pub fn get_filter( &self, user_id: &UserId, filter_id: &str, ) -> Result> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(filter_id.as_bytes()); - - let raw = self.userfilterid_filter.get(&key)?; - - if let Some(raw) = raw { - serde_json::from_slice(&raw) - .map_err(|_| Error::bad_database("Invalid filter event in db.")) - } else { - Ok(None) - } + self.db.get_filter(user_id, filter_id) } } From fd0ea4bf717174726b895bbc03b9953845869733 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sat, 13 Aug 2022 23:29:01 +0200 Subject: [PATCH 368/445] feat(database/presence): add skeleton for presence maintenance --- src/database/key_value/rooms/edus/presence.rs | 18 +++++++++++++++++- 1 file changed, 17 insertions(+), 1 deletion(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 61bd9d60..1978ce7b 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -100,6 +100,23 @@ impl service::room::edus::presence::Data for KeyValueDatabase { Ok(hashmap) } + + fn presence_maintain(&self, db: Arc>) { + // TODO @M0dEx: move this to a timed tasks module + tokio::spawn(async move { + loop { + select! { + Some(user_id) = self.presence_timers.next() { + // TODO @M0dEx: would it be better to acquire the lock outside the loop? + let guard = db.read().await; + + // TODO @M0dEx: add self.presence_timers + // TODO @M0dEx: maintain presence + } + } + } + }); + } } fn parse_presence_event(bytes: &[u8]) -> Result { @@ -121,4 +138,3 @@ fn parse_presence_event(bytes: &[u8]) -> Result { .map(|timestamp| current_timestamp - timestamp); } } - From 19743ae1950e152e40d88f804b14c32e0a0c2142 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:30 +0200 Subject: [PATCH 369/445] refactor: prepare service/rooms/user/data.rs from service/rooms/user/mod.rs --- src/service/rooms/user/{mod.rs => data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/user/{mod.rs => data.rs} (100%) diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/data.rs similarity index 100% rename from src/service/rooms/user/mod.rs rename to src/service/rooms/user/data.rs From 7e0b8ec0acfb7756b7db28eaf2e9e0cc8bb73662 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:30 +0200 Subject: [PATCH 370/445] refactor: prepare database/key_value/rooms/user.rs from service/rooms/user/mod.rs --- .../rooms/user/mod.rs => database/key_value/rooms/user.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/user/mod.rs => database/key_value/rooms/user.rs} (100%) diff --git a/src/service/rooms/user/mod.rs b/src/database/key_value/rooms/user.rs similarity index 100% rename from src/service/rooms/user/mod.rs rename to src/database/key_value/rooms/user.rs From 6d981f37a2cfcb07a0fc479daebd92b1375d2c0c Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:30 +0200 Subject: [PATCH 371/445] refactor: prepare database/key_value/rooms/state_accessor.rs from service/rooms/state_accessor/mod.rs --- .../mod.rs => database/key_value/rooms/state_accessor.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service/rooms/state_accessor/mod.rs => database/key_value/rooms/state_accessor.rs} (100%) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/database/key_value/rooms/state_accessor.rs similarity index 100% rename from src/service/rooms/state_accessor/mod.rs rename to src/database/key_value/rooms/state_accessor.rs From 61f6ac0d66458dba58b28fca264780878806b1f7 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:31 +0200 Subject: [PATCH 372/445] refactor: prepare service/rooms/state_accessor/data.rs from service/rooms/state_accessor/mod.rs --- src/service/rooms/state_accessor/{mod.rs => data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/rooms/state_accessor/{mod.rs => data.rs} (100%) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/data.rs similarity index 100% rename from src/service/rooms/state_accessor/mod.rs rename to src/service/rooms/state_accessor/data.rs From 810a6baf344a0b974d0b2b6a430da982c26034f9 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:31 +0200 Subject: [PATCH 373/445] refactor: prepare service/uiaa/data.rs from service/uiaa/mod.rs --- src/service/{uiaa.rs => uiaa/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{uiaa.rs => uiaa/data.rs} (100%) diff --git a/src/service/uiaa.rs b/src/service/uiaa/data.rs similarity index 100% rename from src/service/uiaa.rs rename to src/service/uiaa/data.rs From 213579ee9deb288caa90a48139f976adcfac5f35 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:19:31 +0200 Subject: [PATCH 374/445] refactor: prepare database/key_value/uiaa.rs from service/uiaa/mod.rs --- src/{service => database/key_value}/uiaa.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/uiaa.rs (100%) diff --git a/src/service/uiaa.rs b/src/database/key_value/uiaa.rs similarity index 100% rename from src/service/uiaa.rs rename to src/database/key_value/uiaa.rs From 82e7f57b389d011bc8d80f9142f723b3cd1e1ad2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 14 Aug 2022 13:38:21 +0200 Subject: [PATCH 375/445] refactor state accessor, state cache, user, uiaa --- .../key_value/rooms/state_accessor.rs | 27 +-- src/database/key_value/rooms/state_cache.rs | 8 + src/database/key_value/rooms/user.rs | 20 +- src/database/key_value/uiaa.rs | 149 +------------ src/service/rooms/state_accessor/data.rs | 141 ++---------- src/service/rooms/state_accessor/mod.rs | 115 ++-------- src/service/rooms/state_cache/data.rs | 3 + src/service/rooms/state_cache/mod.rs | 65 +----- src/service/rooms/user/data.rs | 113 +--------- src/service/rooms/user/mod.rs | 102 ++------- src/service/uiaa/data.rs | 208 +----------------- src/service/uiaa/mod.rs | 96 +------- 12 files changed, 115 insertions(+), 932 deletions(-) create mode 100644 src/database/key_value/rooms/state_cache.rs create mode 100644 src/service/rooms/state_cache/data.rs diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ae26a7c4..db81967d 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,7 +1,5 @@ - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { +impl service::room::state_accessor::Data for KeyValueDatabase { + async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? .pop() @@ -21,8 +19,7 @@ Ok(result) } - #[tracing::instrument(skip(self))] - pub async fn state_full( + async fn state_full( &self, shortstatehash: u64, ) -> Result>> { @@ -59,8 +56,7 @@ } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( + fn state_get_id( &self, shortstatehash: u64, event_type: &StateEventType, @@ -86,8 +82,7 @@ } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( + fn state_get( &self, shortstatehash: u64, event_type: &StateEventType, @@ -98,7 +93,7 @@ } /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { + fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.eventid_shorteventid .get(event_id.as_bytes())? .map_or(Ok(None), |shorteventid| { @@ -116,8 +111,7 @@ } /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( + async fn room_state_full( &self, room_id: &RoomId, ) -> Result>> { @@ -129,8 +123,7 @@ } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( + fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, @@ -144,8 +137,7 @@ } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( + fn room_state_get( &self, room_id: &RoomId, event_type: &StateEventType, @@ -157,4 +149,3 @@ Ok(None) } } - diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs new file mode 100644 index 00000000..37814020 --- /dev/null +++ b/src/database/key_value/rooms/state_cache.rs @@ -0,0 +1,8 @@ +impl service::room::state_cache::Data for KeyValueDatabase { + fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + self.roomuseroncejoinedids.insert(&userroom_id, &[])?; + } +} diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 976ab5b3..52145ced 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,6 +1,5 @@ - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { +impl service::room::user::Data for KeyValueDatabase { + fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -13,8 +12,7 @@ Ok(()) } - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -28,8 +26,7 @@ .unwrap_or(Ok(0)) } - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -43,7 +40,7 @@ .unwrap_or(Ok(0)) } - pub fn associate_token_shortstatehash( + fn associate_token_shortstatehash( &self, room_id: &RoomId, token: u64, @@ -58,7 +55,7 @@ .insert(&key, &shortstatehash.to_be_bytes()) } - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); @@ -74,8 +71,7 @@ .transpose() } - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( + fn get_shared_rooms<'a>( &'a self, users: Vec>, ) -> Result>> + 'a> { @@ -111,4 +107,4 @@ .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) })) } - +} diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 12373139..4d1dac57 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,149 +1,4 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; - -use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; -use ruma::{ - api::client::{ - error::ErrorKind, - uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, - IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, - }, - }, - signatures::CanonicalJsonValue, - DeviceId, UserId, -}; -use tracing::error; - -use super::abstraction::Tree; - -pub struct Uiaa { - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: - RwLock, Box, String), CanonicalJsonValue>>, -} - -impl Uiaa { - /// Creates a new Uiaa session. Make sure the session token is unique. - pub fn create( - &self, - user_id: &UserId, - device_id: &DeviceId, - uiaainfo: &UiaaInfo, - json_body: &CanonicalJsonValue, - ) -> Result<()> { - self.set_uiaa_request( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) - json_body, - )?; - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), - Some(uiaainfo), - ) - } - - pub fn try_auth( - &self, - user_id: &UserId, - device_id: &DeviceId, - auth: &IncomingAuthData, - uiaainfo: &UiaaInfo, - users: &super::users::Users, - globals: &super::globals::Globals, - ) -> Result<(bool, UiaaInfo)> { - let mut uiaainfo = auth - .session() - .map(|session| self.get_uiaa_session(user_id, device_id, session)) - .unwrap_or_else(|| Ok(uiaainfo.clone()))?; - - if uiaainfo.session.is_none() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - } - - match auth { - // Find out what the user completed - IncomingAuthData::Password(IncomingPassword { - identifier, - password, - .. - }) => { - let username = match identifier { - UserIdOrLocalpart(username) => username, - _ => { - return Err(Error::BadRequest( - ErrorKind::Unrecognized, - "Identifier type not recognized.", - )) - } - }; - - let user_id = - UserId::parse_with_server_name(username.clone(), globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") - })?; - - // Check if password is correct - if let Some(hash) = users.password_hash(&user_id)? { - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - - if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid username or password.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - } - - // Password was correct! Let's add it to `completed` - uiaainfo.completed.push(AuthType::Password); - } - IncomingAuthData::Dummy(_) => { - uiaainfo.completed.push(AuthType::Dummy); - } - k => error!("type not supported: {:?}", k), - } - - // Check if a flow now succeeds - let mut completed = false; - 'flows: for flow in &mut uiaainfo.flows { - for stage in &flow.stages { - if !uiaainfo.completed.contains(stage) { - continue 'flows; - } - } - // We didn't break, so this flow succeeded! - completed = true; - } - - if !completed { - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - Some(&uiaainfo), - )?; - return Ok((false, uiaainfo)); - } - - // UIAA was successful! Remove this session and return true - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - None, - )?; - Ok((true, uiaainfo)) - } - +impl service::uiaa::Data for KeyValueDatabase { fn set_uiaa_request( &self, user_id: &UserId, @@ -162,7 +17,7 @@ impl Uiaa { Ok(()) } - pub fn get_uiaa_request( + fn get_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index ae26a7c4..a2b76e46 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,160 +1,51 @@ +pub trait Data { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); + async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) - } - - #[tracing::instrument(skip(self))] - pub async fn state_full( + async fn state_full( &self, shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) - } + ) -> Result>>; /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( + fn state_get_id( &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } + ) -> Result>>; /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( + fn state_get( &self, shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } + ) -> Result>>; /// Returns the state hash for this pdu. - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } + fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub async fn room_state_full( + async fn room_state_full( &self, room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } - } + ) -> Result>>; /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( + fn room_state_get_id( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } + ) -> Result>>; /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( + fn room_state_get( &self, room_id: &RoomId, event_type: &StateEventType, state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - + ) -> Result>>; +} diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index ae26a7c4..28a49a98 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,24 +1,18 @@ +mod data; +pub use data::Data; + +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - let mut result = BTreeMap::new(); - let mut i = 0; - for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; - result.insert(parsed.0, parsed.1); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - Ok(result) + self.db.state_full_ids(shortstatehash) } #[tracing::instrument(skip(self))] @@ -26,36 +20,7 @@ &self, shortstatehash: u64, ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - let mut result = HashMap::new(); - let mut i = 0; - for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { - result.insert( - ( - pdu.kind.to_string().into(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - ); - } - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - Ok(result) + self.db.state_full(shortstatehash) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -66,23 +31,7 @@ event_type: &StateEventType, state_key: &str, ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) + self.db.state_get_id(shortstatehash, event_type, state_key) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -93,26 +42,12 @@ event_type: &StateEventType, state_key: &str, ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) + self.db.pdu_state_get(event_id) } /// Returns the state hash for this pdu. pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) + self.db.pdu_shortstatehash(event_id) } /// Returns the full room state. @@ -121,11 +56,7 @@ &self, room_id: &RoomId, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash).await - } else { - Ok(HashMap::new()) - } + self.db.room_state_full(room_id) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -136,11 +67,7 @@ event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } + self.db.room_state_get_id(room_id, event_type, state_key) } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -151,10 +78,6 @@ event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } + self.db.room_state_get(room_id, event_type, state_key) } - +} diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs new file mode 100644 index 00000000..166d4f6b --- /dev/null +++ b/src/service/rooms/state_cache/data.rs @@ -0,0 +1,3 @@ +pub trait Data { + fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()>; +} diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index e7f457e6..778679de 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,4 +1,13 @@ +mod data; +pub use data::Data; +use crate::service::*; + +pub struct Service { + db: D, +} + +impl Service<_> { /// Update current membership data. #[tracing::instrument(skip(self, last_state, db))] pub fn update_membership( @@ -25,10 +34,6 @@ serverroom_id.push(0xff); serverroom_id.extend_from_slice(room_id.as_bytes()); - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - let mut roomuser_id = room_id.as_bytes().to_vec(); roomuser_id.push(0xff); roomuser_id.extend_from_slice(user_id.as_bytes()); @@ -38,7 +43,7 @@ // Check if the user never joined this room if !self.once_joined(user_id, room_id)? { // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; + self.db.mark_as_once_joined(user_id, room_id)?; // Check if the room has a predecessor if let Some(predecessor) = self @@ -116,10 +121,6 @@ } } - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } self.userroomid_joined.insert(&userroom_id, &[])?; self.roomuserid_joined.insert(&roomuser_id, &[])?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -150,10 +151,6 @@ return Ok(()); } - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } self.userroomid_invitestate.insert( &userroom_id, &serde_json::to_vec(&last_state.unwrap_or_default()) @@ -167,16 +164,6 @@ self.roomuserid_leftcount.remove(&roomuser_id)?; } MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } self.userroomid_leftstate.insert( &userroom_id, &serde_json::to_vec(&Vec::>::new()).unwrap(), @@ -231,36 +218,6 @@ .unwrap() .insert(room_id.to_owned(), Arc::new(real_users)); - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.appservice_in_room_cache .write() .unwrap() @@ -714,4 +671,4 @@ Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) } - +} diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 976ab5b3..47a44eef 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,114 +1,21 @@ +pub trait Data { + fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); + fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; + fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - pub fn associate_token_shortstatehash( + fn associate_token_shortstatehash( &self, room_id: &RoomId, token: u64, shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } + ) -> Result<()>; - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); + fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result>; - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( + fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - + ) -> Result>> + 'a>; +} diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 976ab5b3..45fb3551 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,46 +1,23 @@ +mod data; +pub use data::Data; - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); +use crate::service::*; - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; +pub struct Service { + db: D, +} - Ok(()) +impl Service<_> { + pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + self.db.reset_notification_counts(user_id, room_id) } - #[tracing::instrument(skip(self))] pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) + self.db.notification_count(user_id, room_id) } - #[tracing::instrument(skip(self))] pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) + self.db.highlight_count(user_id, room_id) } pub fn associate_token_shortstatehash( @@ -49,66 +26,17 @@ token: u64, shortstatehash: u64, ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) + self.db.associate_token_shortstatehash(user_id, room_id) } pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() + self.db.get_token_shortstatehash(room_id, token) } - #[tracing::instrument(skip(self))] pub fn get_shared_rooms<'a>( &'a self, users: Vec>, ) -> Result>> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) + self.db.get_shared_rooms(users) } - +} diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index 12373139..40e69bda 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,179 +1,18 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; - -use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; -use ruma::{ - api::client::{ - error::ErrorKind, - uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, - IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, - }, - }, - signatures::CanonicalJsonValue, - DeviceId, UserId, -}; -use tracing::error; - -use super::abstraction::Tree; - -pub struct Uiaa { - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: - RwLock, Box, String), CanonicalJsonValue>>, -} - -impl Uiaa { - /// Creates a new Uiaa session. Make sure the session token is unique. - pub fn create( - &self, - user_id: &UserId, - device_id: &DeviceId, - uiaainfo: &UiaaInfo, - json_body: &CanonicalJsonValue, - ) -> Result<()> { - self.set_uiaa_request( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) - json_body, - )?; - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), - Some(uiaainfo), - ) - } - - pub fn try_auth( - &self, - user_id: &UserId, - device_id: &DeviceId, - auth: &IncomingAuthData, - uiaainfo: &UiaaInfo, - users: &super::users::Users, - globals: &super::globals::Globals, - ) -> Result<(bool, UiaaInfo)> { - let mut uiaainfo = auth - .session() - .map(|session| self.get_uiaa_session(user_id, device_id, session)) - .unwrap_or_else(|| Ok(uiaainfo.clone()))?; - - if uiaainfo.session.is_none() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - } - - match auth { - // Find out what the user completed - IncomingAuthData::Password(IncomingPassword { - identifier, - password, - .. - }) => { - let username = match identifier { - UserIdOrLocalpart(username) => username, - _ => { - return Err(Error::BadRequest( - ErrorKind::Unrecognized, - "Identifier type not recognized.", - )) - } - }; - - let user_id = - UserId::parse_with_server_name(username.clone(), globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") - })?; - - // Check if password is correct - if let Some(hash) = users.password_hash(&user_id)? { - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - - if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid username or password.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - } - - // Password was correct! Let's add it to `completed` - uiaainfo.completed.push(AuthType::Password); - } - IncomingAuthData::Dummy(_) => { - uiaainfo.completed.push(AuthType::Dummy); - } - k => error!("type not supported: {:?}", k), - } - - // Check if a flow now succeeds - let mut completed = false; - 'flows: for flow in &mut uiaainfo.flows { - for stage in &flow.stages { - if !uiaainfo.completed.contains(stage) { - continue 'flows; - } - } - // We didn't break, so this flow succeeded! - completed = true; - } - - if !completed { - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - Some(&uiaainfo), - )?; - return Ok((false, uiaainfo)); - } - - // UIAA was successful! Remove this session and return true - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - None, - )?; - Ok((true, uiaainfo)) - } - +pub trait Data { fn set_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, session: &str, request: &CanonicalJsonValue, - ) -> Result<()> { - self.userdevicesessionid_uiaarequest - .write() - .unwrap() - .insert( - (user_id.to_owned(), device_id.to_owned(), session.to_owned()), - request.to_owned(), - ); - - Ok(()) - } + ) -> Result<()>; - pub fn get_uiaa_request( + fn get_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, session: &str, - ) -> Option { - self.userdevicesessionid_uiaarequest - .read() - .unwrap() - .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) - .map(|j| j.to_owned()) - } + ) -> Option; fn update_uiaa_session( &self, @@ -181,47 +20,12 @@ impl Uiaa { device_id: &DeviceId, session: &str, uiaainfo: Option<&UiaaInfo>, - ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - if let Some(uiaainfo) = uiaainfo { - self.userdevicesessionid_uiaainfo.insert( - &userdevicesessionid, - &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), - )?; - } else { - self.userdevicesessionid_uiaainfo - .remove(&userdevicesessionid)?; - } - - Ok(()) - } + ) -> Result<()>; fn get_uiaa_session( &self, user_id: &UserId, device_id: &DeviceId, session: &str, - ) -> Result { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - serde_json::from_slice( - &self - .userdevicesessionid_uiaainfo - .get(&userdevicesessionid)? - .ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "UIAA session does not exist.", - ))?, - ) - .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) - } + ) -> Result; } diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 12373139..593ea5f2 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,31 +1,13 @@ -use std::{ - collections::BTreeMap, - sync::{Arc, RwLock}, -}; +mod data; +pub use data::Data; -use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; -use ruma::{ - api::client::{ - error::ErrorKind, - uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, - IncomingUserIdentifier::UserIdOrLocalpart, UiaaInfo, - }, - }, - signatures::CanonicalJsonValue, - DeviceId, UserId, -}; -use tracing::error; +use crate::service::*; -use super::abstraction::Tree; - -pub struct Uiaa { - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: - RwLock, Box, String), CanonicalJsonValue>>, +pub struct Service { + db: D, } -impl Uiaa { +impl Service<_> { /// Creates a new Uiaa session. Make sure the session token is unique. pub fn create( &self, @@ -144,35 +126,13 @@ impl Uiaa { Ok((true, uiaainfo)) } - fn set_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - request: &CanonicalJsonValue, - ) -> Result<()> { - self.userdevicesessionid_uiaarequest - .write() - .unwrap() - .insert( - (user_id.to_owned(), device_id.to_owned(), session.to_owned()), - request.to_owned(), - ); - - Ok(()) - } - pub fn get_uiaa_request( &self, user_id: &UserId, device_id: &DeviceId, session: &str, ) -> Option { - self.userdevicesessionid_uiaarequest - .read() - .unwrap() - .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) - .map(|j| j.to_owned()) + self.db.get_uiaa_request(user_id, device_id, session) } fn update_uiaa_session( @@ -182,46 +142,6 @@ impl Uiaa { session: &str, uiaainfo: Option<&UiaaInfo>, ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - if let Some(uiaainfo) = uiaainfo { - self.userdevicesessionid_uiaainfo.insert( - &userdevicesessionid, - &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), - )?; - } else { - self.userdevicesessionid_uiaainfo - .remove(&userdevicesessionid)?; - } - - Ok(()) - } - - fn get_uiaa_session( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - ) -> Result { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - serde_json::from_slice( - &self - .userdevicesessionid_uiaainfo - .get(&userdevicesessionid)? - .ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "UIAA session does not exist.", - ))?, - ) - .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) + self.db.update_uiaa_session(user_id, device_id, session, uiaainfo) } } From 057f8364cc317dc8646043abd6c8ff3ef759625f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 6 Sep 2022 23:15:09 +0200 Subject: [PATCH 376/445] fix: some compile time errors Only 174 errors left! --- Cargo.lock | 6 +- Cargo.toml | 2 + src/api/appservice_server.rs | 7 +- src/api/client_server/account.rs | 86 +-- src/api/client_server/alias.rs | 40 +- src/api/client_server/backup.rs | 107 ++-- src/api/client_server/capabilities.rs | 13 +- src/api/client_server/config.rs | 20 +- src/api/client_server/context.rs | 37 +- src/api/client_server/device.rs | 37 +- src/api/client_server/directory.rs | 40 +- src/api/client_server/filter.rs | 8 +- src/api/client_server/keys.rs | 83 +-- src/api/client_server/media.rs | 47 +- src/api/client_server/membership.rs | 570 ++++++++---------- src/api/client_server/message.rs | 37 +- src/api/client_server/presence.rs | 19 +- src/api/client_server/profile.rs | 81 ++- src/api/client_server/push.rs | 58 +- src/api/client_server/read_marker.rs | 31 +- src/api/client_server/redact.rs | 10 +- src/api/client_server/report.rs | 9 +- src/api/client_server/room.rs | 146 ++--- src/api/client_server/search.rs | 11 +- src/api/client_server/session.rs | 33 +- src/api/client_server/state.rs | 43 +- src/api/client_server/sync.rs | 142 ++--- src/api/client_server/tag.rs | 21 +- src/api/client_server/to_device.rs | 23 +- src/api/client_server/typing.rs | 12 +- src/api/client_server/user_directory.rs | 15 +- src/api/client_server/voip.rs | 15 +- src/api/mod.rs | 4 + src/api/ruma_wrapper/axum.rs | 18 +- src/api/server_server.rs | 403 ++++++------- src/database/abstraction.rs | 4 +- src/database/abstraction/rocksdb.rs | 8 +- src/database/abstraction/sqlite.rs | 8 +- src/database/key_value/appservice.rs | 4 +- src/database/key_value/mod.rs | 14 +- src/database/key_value/pusher.rs | 4 + src/database/key_value/rooms/alias.rs | 8 +- src/database/key_value/rooms/directory.rs | 10 +- src/database/key_value/rooms/edus/mod.rs | 3 + src/database/key_value/rooms/edus/presence.rs | 12 +- .../key_value/rooms/edus/read_receipt.rs | 14 +- src/database/key_value/rooms/edus/typing.rs | 13 +- src/database/key_value/rooms/lazy_load.rs | 6 +- src/database/key_value/rooms/metadata.rs | 6 +- src/database/key_value/rooms/mod.rs | 7 +- src/database/key_value/rooms/outlier.rs | 6 +- src/database/key_value/rooms/pdu_metadata.rs | 8 +- src/database/key_value/rooms/search.rs | 12 +- src/database/key_value/rooms/state.rs | 23 +- .../key_value/rooms/state_accessor.rs | 10 +- src/database/key_value/rooms/state_cache.rs | 10 +- .../key_value/rooms/state_compressor.rs | 19 +- src/database/key_value/rooms/timeline.rs | 31 +- src/database/key_value/rooms/user.rs | 6 +- src/database/key_value/transaction_ids.rs | 10 +- src/database/key_value/uiaa.rs | 6 + src/database/key_value/users.rs | 138 ++--- src/database/mod.rs | 221 ++++--- src/lib.rs | 29 +- src/main.rs | 71 ++- src/service/account_data.rs | 12 +- src/service/admin.rs | 179 +++--- src/service/appservice/data.rs | 11 +- src/service/key_backups.rs | 21 +- src/service/media.rs | 1 - src/service/mod.rs | 28 + src/service/pdu.rs | 5 +- src/service/pusher/data.rs | 8 +- src/service/pusher/mod.rs | 64 +- src/service/rooms/alias/data.rs | 10 +- src/service/rooms/alias/mod.rs | 7 +- src/service/rooms/auth_chain/data.rs | 2 + src/service/rooms/auth_chain/mod.rs | 2 + src/service/rooms/directory/data.rs | 2 + src/service/rooms/directory/mod.rs | 9 +- src/service/rooms/edus/mod.rs | 6 + src/service/rooms/edus/presence/data.rs | 4 + src/service/rooms/edus/presence/mod.rs | 5 +- src/service/rooms/edus/read_receipt/data.rs | 2 + src/service/rooms/edus/read_receipt/mod.rs | 7 +- src/service/rooms/edus/typing/data.rs | 4 + src/service/rooms/edus/typing/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 498 ++++++++------- src/service/rooms/lazy_loading/data.rs | 2 + src/service/rooms/lazy_loading/mod.rs | 7 +- src/service/rooms/metadata/data.rs | 2 + src/service/rooms/metadata/mod.rs | 1 + src/service/rooms/mod.rs | 251 ++------ src/service/rooms/outlier/data.rs | 4 + src/service/rooms/outlier/mod.rs | 3 +- src/service/rooms/pdu_metadata/data.rs | 4 + src/service/rooms/pdu_metadata/mod.rs | 3 + src/service/rooms/search/data.rs | 6 +- src/service/rooms/search/mod.rs | 3 +- src/service/rooms/short/mod.rs | 6 +- src/service/rooms/state/data.rs | 12 +- src/service/rooms/state/mod.rs | 66 +- src/service/rooms/state_accessor/data.rs | 6 + src/service/rooms/state_accessor/mod.rs | 7 +- src/service/rooms/state_cache/data.rs | 2 + src/service/rooms/state_cache/mod.rs | 34 +- src/service/rooms/state_compressor/data.rs | 4 +- src/service/rooms/state_compressor/mod.rs | 17 +- src/service/rooms/timeline/data.rs | 31 +- src/service/rooms/timeline/mod.rs | 53 +- src/service/rooms/user/mod.rs | 1 + src/service/transaction_ids/data.rs | 4 +- src/service/transaction_ids/mod.rs | 1 + src/service/uiaa/data.rs | 2 + src/service/uiaa/mod.rs | 10 +- src/service/users/data.rs | 108 ++-- src/service/users/mod.rs | 24 +- src/utils/{utils.rs => mod.rs} | 2 + 118 files changed, 2113 insertions(+), 2407 deletions(-) create mode 100644 src/api/mod.rs create mode 100644 src/database/key_value/rooms/edus/mod.rs create mode 100644 src/service/mod.rs rename src/utils/{utils.rs => mod.rs} (99%) diff --git a/Cargo.lock b/Cargo.lock index d8d791f0..c074c760 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -98,9 +98,9 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.56" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96cf8829f67d2eab0b2dfa42c5d0ef737e0724e4a82b01b3e292456202b19716" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" dependencies = [ "proc-macro2", "quote", @@ -408,6 +408,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" name = "conduit" version = "0.3.0-next" dependencies = [ + "async-trait", "axum", "axum-server", "base64 0.13.0", @@ -422,6 +423,7 @@ dependencies = [ "http", "image", "jsonwebtoken", + "lazy_static", "lru-cache", "num_cpus", "opentelemetry", diff --git a/Cargo.toml b/Cargo.toml index f150c4e7..b88674dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -90,6 +90,8 @@ figment = { version = "0.10.6", features = ["env", "toml"] } tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true } tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } +lazy_static = "1.4.0" +async-trait = "0.1.57" [features] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index ce122dad..1f6e2c9d 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -1,12 +1,11 @@ -use crate::{utils, Error, Result}; +use crate::{utils, Error, Result, services}; use bytes::BytesMut; use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; -#[tracing::instrument(skip(globals, request))] +#[tracing::instrument(skip(request))] pub(crate) async fn send_request( - globals: &crate::database::globals::Globals, registration: serde_yaml::Value, request: T, ) -> Result @@ -46,7 +45,7 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = globals.default_client().execute(reqwest_request).await?; + let mut response = services().globals.default_client().execute(reqwest_request).await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index dc0782d1..848bfaa7 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -2,9 +2,7 @@ use std::sync::Arc; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ - database::{admin::make_user_admin, DatabaseGuard}, - pdu::PduBuilder, - utils, Database, Error, Result, Ruma, + utils, Error, Result, Ruma, services, }; use ruma::{ api::client::{ @@ -42,15 +40,14 @@ const RANDOM_USER_ID_LENGTH: usize = 10; /// /// Note: This will not reserve the username, so the username might become invalid when trying to register pub async fn get_register_available_route( - db: DatabaseGuard, body: Ruma, ) -> Result { // Validate user id let user_id = - UserId::parse_with_server_name(body.username.to_lowercase(), db.globals.server_name()) + UserId::parse_with_server_name(body.username.to_lowercase(), services().globals.server_name()) .ok() .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == db.globals.server_name() + !user_id.is_historical() && user_id.server_name() == services().globals.server_name() }) .ok_or(Error::BadRequest( ErrorKind::InvalidUsername, @@ -58,7 +55,7 @@ pub async fn get_register_available_route( ))?; // Check if username is creative enough - if db.users.exists(&user_id)? { + if services().users.exists(&user_id)? { return Err(Error::BadRequest( ErrorKind::UserInUse, "Desired user ID is already taken.", @@ -85,10 +82,9 @@ pub async fn get_register_available_route( /// - Creates a new account and populates it with default account data /// - If `inhibit_login` is false: Creates a device and returns device id and access_token pub async fn register_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_registration() && !body.from_appservice { + if !services().globals.allow_registration() && !body.from_appservice { return Err(Error::BadRequest( ErrorKind::Forbidden, "Registration has been disabled.", @@ -100,17 +96,17 @@ pub async fn register_route( let user_id = match (&body.username, is_guest) { (Some(username), false) => { let proposed_user_id = - UserId::parse_with_server_name(username.to_lowercase(), db.globals.server_name()) + UserId::parse_with_server_name(username.to_lowercase(), services().globals.server_name()) .ok() .filter(|user_id| { !user_id.is_historical() - && user_id.server_name() == db.globals.server_name() + && user_id.server_name() == services().globals.server_name() }) .ok_or(Error::BadRequest( ErrorKind::InvalidUsername, "Username is invalid.", ))?; - if db.users.exists(&proposed_user_id)? { + if services().users.exists(&proposed_user_id)? { return Err(Error::BadRequest( ErrorKind::UserInUse, "Desired user ID is already taken.", @@ -121,10 +117,10 @@ pub async fn register_route( _ => loop { let proposed_user_id = UserId::parse_with_server_name( utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), - db.globals.server_name(), + services().globals.server_name(), ) .unwrap(); - if !db.users.exists(&proposed_user_id)? { + if !services().users.exists(&proposed_user_id)? { break proposed_user_id; } }, @@ -143,14 +139,12 @@ pub async fn register_route( if !body.from_appservice { if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &UserId::parse_with_server_name("", db.globals.server_name()) + let (worked, uiaainfo) = services().uiaa.try_auth( + &UserId::parse_with_server_name("", services().globals.server_name()) .expect("we know this is valid"), "".into(), auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -158,8 +152,8 @@ pub async fn register_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &UserId::parse_with_server_name("", db.globals.server_name()) + services().uiaa.create( + &UserId::parse_with_server_name("", services().globals.server_name()) .expect("we know this is valid"), "".into(), &uiaainfo, @@ -178,15 +172,15 @@ pub async fn register_route( }; // Create user - db.users.create(&user_id, password)?; + services().users.create(&user_id, password)?; // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - db.users + services().users .set_displayname(&user_id, Some(displayname.clone()))?; // Initial account data - db.account_data.update( + services().account_data.update( None, &user_id, GlobalAccountDataEventType::PushRules.to_string().into(), @@ -195,7 +189,6 @@ pub async fn register_route( global: push::Ruleset::server_default(&user_id), }, }, - &db.globals, )?; // Inhibit login does not work for guests @@ -219,7 +212,7 @@ pub async fn register_route( let token = utils::random_string(TOKEN_LENGTH); // Create device for this account - db.users.create_device( + services().users.create_device( &user_id, &device_id, &token, @@ -227,7 +220,7 @@ pub async fn register_route( )?; info!("New user {} registered on this server.", user_id); - db.admin + services().admin .send_message(RoomMessageEventContent::notice_plain(format!( "New user {} registered on this server.", user_id @@ -235,14 +228,12 @@ pub async fn register_route( // If this is the first real user, grant them admin privileges // Note: the server user, @conduit:servername, is generated first - if db.users.count()? == 2 { - make_user_admin(&db, &user_id, displayname).await?; + if services().users.count()? == 2 { + services().admin.make_user_admin(&user_id, displayname).await?; warn!("Granting {} admin privileges as the first user", user_id); } - db.flush()?; - Ok(register::v3::Response { access_token: Some(token), user_id, @@ -265,7 +256,6 @@ pub async fn register_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn change_password_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -282,13 +272,11 @@ pub async fn change_password_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -296,32 +284,30 @@ pub async fn change_password_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - db.users + services().users .set_password(sender_user, Some(&body.new_password))?; if body.logout_devices { // Logout all devices except the current one - for id in db + for id in services() .users .all_device_ids(sender_user) .filter_map(|id| id.ok()) .filter(|id| id != sender_device) { - db.users.remove_device(sender_user, &id)?; + services().users.remove_device(sender_user, &id)?; } } - db.flush()?; - info!("User {} changed their password.", sender_user); - db.admin + services().admin .send_message(RoomMessageEventContent::notice_plain(format!( "User {} changed their password.", sender_user @@ -336,7 +322,6 @@ pub async fn change_password_route( /// /// Note: Also works for Application Services pub async fn whoami_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -345,7 +330,7 @@ pub async fn whoami_route( Ok(whoami::v3::Response { user_id: sender_user.clone(), device_id, - is_guest: db.users.is_deactivated(&sender_user)?, + is_guest: services().users.is_deactivated(&sender_user)?, }) } @@ -360,7 +345,6 @@ pub async fn whoami_route( /// - Triggers device list updates /// - Removes ability to log in again pub async fn deactivate_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -377,13 +361,11 @@ pub async fn deactivate_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -391,7 +373,7 @@ pub async fn deactivate_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -399,20 +381,18 @@ pub async fn deactivate_route( } // Make the user leave all rooms before deactivation - db.rooms.leave_all_rooms(&sender_user, &db).await?; + services().rooms.leave_all_rooms(&sender_user).await?; // Remove devices and mark account as deactivated - db.users.deactivate_account(sender_user)?; + services().users.deactivate_account(sender_user)?; info!("User {} deactivated their account.", sender_user); - db.admin + services().admin .send_message(RoomMessageEventContent::notice_plain(format!( "User {} deactivated their account.", sender_user ))); - db.flush()?; - Ok(deactivate::v3::Response { id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, }) diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 90e9d2c3..7aa5fb2c 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use regex::Regex; use ruma::{ api::{ @@ -16,24 +16,21 @@ use ruma::{ /// /// Creates a new room alias on this server. pub async fn create_alias_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.room_alias.server_name() != db.globals.server_name() { + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Alias is from another server.", )); } - if db.rooms.id_from_alias(&body.room_alias)?.is_some() { + if services().rooms.id_from_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); } - db.rooms - .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; - - db.flush()?; + services().rooms + .set_alias(&body.room_alias, Some(&body.room_id))?; Ok(create_alias::v3::Response::new()) } @@ -45,22 +42,19 @@ pub async fn create_alias_route( /// - TODO: additional access control checks /// - TODO: Update canonical alias event pub async fn delete_alias_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.room_alias.server_name() != db.globals.server_name() { + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Alias is from another server.", )); } - db.rooms.set_alias(&body.room_alias, None, &db.globals)?; + services().rooms.set_alias(&body.room_alias, None)?; // TODO: update alt_aliases? - db.flush()?; - Ok(delete_alias::v3::Response::new()) } @@ -70,21 +64,18 @@ pub async fn delete_alias_route( /// /// - TODO: Suggest more servers to join via pub async fn get_alias_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - get_alias_helper(&db, &body.room_alias).await + get_alias_helper(&body.room_alias).await } pub(crate) async fn get_alias_helper( - db: &Database, room_alias: &RoomAliasId, ) -> Result { - if room_alias.server_name() != db.globals.server_name() { - let response = db + if room_alias.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) @@ -97,10 +88,10 @@ pub(crate) async fn get_alias_helper( } let mut room_id = None; - match db.rooms.id_from_alias(room_alias)? { + match services().rooms.id_from_alias(room_alias)? { Some(r) => room_id = Some(r), None => { - for (_id, registration) in db.appservice.all()? { + for (_id, registration) in services().appservice.all()? { let aliases = registration .get("namespaces") .and_then(|ns| ns.get("aliases")) @@ -115,17 +106,16 @@ pub(crate) async fn get_alias_helper( if aliases .iter() .any(|aliases| aliases.is_match(room_alias.as_str())) - && db + && services() .sending .send_appservice_request( - &db.globals, registration, appservice::query::query_room_alias::v1::Request { room_alias }, ) .await .is_ok() { - room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| { + room_id = Some(services().rooms.id_from_alias(room_alias)?.ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") })?); break; @@ -146,6 +136,6 @@ pub(crate) async fn get_alias_helper( Ok(get_alias::v3::Response::new( room_id, - vec![db.globals.server_name().to_owned()], + vec![services().globals.server_name().to_owned()], )) } diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs index 067f20cd..e4138938 100644 --- a/src/api/client_server/backup.rs +++ b/src/api/client_server/backup.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::api::client::{ backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, @@ -14,15 +14,12 @@ use ruma::api::client::{ /// /// Creates a new backup. pub async fn create_backup_version_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let version = db + let version = services() .key_backups - .create_backup(sender_user, &body.algorithm, &db.globals)?; - - db.flush()?; + .create_backup(sender_user, &body.algorithm)?; Ok(create_backup_version::v3::Response { version }) } @@ -31,14 +28,11 @@ pub async fn create_backup_version_route( /// /// Update information about an existing backup. Only `auth_data` can be modified. pub async fn update_backup_version_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups - .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; - - db.flush()?; + services().key_backups + .update_backup(sender_user, &body.version, &body.algorithm)?; Ok(update_backup_version::v3::Response {}) } @@ -47,13 +41,12 @@ pub async fn update_backup_version_route( /// /// Get information about the latest backup version. pub async fn get_latest_backup_info_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let (version, algorithm) = - db.key_backups + services().key_backups .get_latest_backup(sender_user)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -62,8 +55,8 @@ pub async fn get_latest_backup_info_route( Ok(get_latest_backup_info::v3::Response { algorithm, - count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &version)?, + count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &version)?, version, }) } @@ -72,11 +65,10 @@ pub async fn get_latest_backup_info_route( /// /// Get information about an existing backup. pub async fn get_backup_info_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let algorithm = db + let algorithm = services() .key_backups .get_backup(sender_user, &body.version)? .ok_or(Error::BadRequest( @@ -86,8 +78,8 @@ pub async fn get_backup_info_route( Ok(get_backup_info::v3::Response { algorithm, - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, version: body.version.to_owned(), }) } @@ -98,14 +90,11 @@ pub async fn get_backup_info_route( /// /// - Deletes both information about the backup, as well as all key data related to the backup pub async fn delete_backup_version_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups.delete_backup(sender_user, &body.version)?; - - db.flush()?; + services().key_backups.delete_backup(sender_user, &body.version)?; Ok(delete_backup_version::v3::Response {}) } @@ -118,13 +107,12 @@ pub async fn delete_backup_version_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) - != db + != services() .key_backups .get_latest_backup_version(sender_user)? .as_ref() @@ -137,22 +125,19 @@ pub async fn add_backup_keys_route( for (room_id, room) in &body.rooms { for (session_id, key_data) in &room.sessions { - db.key_backups.add_key( + services().key_backups.add_key( sender_user, &body.version, room_id, session_id, key_data, - &db.globals, )? } } - db.flush()?; - Ok(add_backup_keys::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -164,13 +149,12 @@ pub async fn add_backup_keys_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) - != db + != services() .key_backups .get_latest_backup_version(sender_user)? .as_ref() @@ -182,21 +166,18 @@ pub async fn add_backup_keys_for_room_route( } for (session_id, key_data) in &body.sessions { - db.key_backups.add_key( + services().key_backups.add_key( sender_user, &body.version, &body.room_id, session_id, key_data, - &db.globals, )? } - db.flush()?; - Ok(add_backup_keys_for_room::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -208,13 +189,12 @@ pub async fn add_backup_keys_for_room_route( /// - Adds the keys to the backup /// - Returns the new number of keys in this backup and the etag pub async fn add_backup_keys_for_session_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if Some(&body.version) - != db + != services() .key_backups .get_latest_backup_version(sender_user)? .as_ref() @@ -225,20 +205,17 @@ pub async fn add_backup_keys_for_session_route( )); } - db.key_backups.add_key( + services().key_backups.add_key( sender_user, &body.version, &body.room_id, &body.session_id, &body.session_data, - &db.globals, )?; - db.flush()?; - Ok(add_backup_keys_for_session::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -246,12 +223,11 @@ pub async fn add_backup_keys_for_session_route( /// /// Retrieves all keys from the backup. pub async fn get_backup_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let rooms = db.key_backups.get_all(sender_user, &body.version)?; + let rooms = services().key_backups.get_all(sender_user, &body.version)?; Ok(get_backup_keys::v3::Response { rooms }) } @@ -260,12 +236,11 @@ pub async fn get_backup_keys_route( /// /// Retrieves all keys from the backup for a given room. pub async fn get_backup_keys_for_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sessions = db + let sessions = services() .key_backups .get_room(sender_user, &body.version, &body.room_id)?; @@ -276,12 +251,11 @@ pub async fn get_backup_keys_for_room_route( /// /// Retrieves a key from the backup. pub async fn get_backup_keys_for_session_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let key_data = db + let key_data = services() .key_backups .get_session(sender_user, &body.version, &body.room_id, &body.session_id)? .ok_or(Error::BadRequest( @@ -296,18 +270,15 @@ pub async fn get_backup_keys_for_session_route( /// /// Delete the keys from the backup. pub async fn delete_backup_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups.delete_all_keys(sender_user, &body.version)?; - - db.flush()?; + services().key_backups.delete_all_keys(sender_user, &body.version)?; Ok(delete_backup_keys::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -315,19 +286,16 @@ pub async fn delete_backup_keys_route( /// /// Delete the keys from the backup for a given room. pub async fn delete_backup_keys_for_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups + services().key_backups .delete_room_keys(sender_user, &body.version, &body.room_id)?; - db.flush()?; - Ok(delete_backup_keys_for_room::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } @@ -335,18 +303,15 @@ pub async fn delete_backup_keys_for_room_route( /// /// Delete a key from the backup. pub async fn delete_backup_keys_for_session_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups + services().key_backups .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?; - db.flush()?; - Ok(delete_backup_keys_for_session::v3::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, + count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &body.version)?, }) } diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index 417ad29d..e4283b72 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Result, Ruma}; +use crate::{Result, Ruma, services}; use ruma::api::client::discovery::get_capabilities::{ self, Capabilities, RoomVersionStability, RoomVersionsCapability, }; @@ -8,26 +8,25 @@ use std::collections::BTreeMap; /// /// Get information on the supported feature set and other relevent capabilities of this server. pub async fn get_capabilities_route( - db: DatabaseGuard, _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); - if db.globals.allow_unstable_room_versions() { - for room_version in &db.globals.unstable_room_versions { + if services().globals.allow_unstable_room_versions() { + for room_version in &services().globals.unstable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Stable); } } else { - for room_version in &db.globals.unstable_room_versions { + for room_version in &services().globals.unstable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Unstable); } } - for room_version in &db.globals.stable_room_versions { + for room_version in &services().globals.stable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Stable); } let mut capabilities = Capabilities::new(); capabilities.room_versions = RoomVersionsCapability { - default: db.globals.default_room_version(), + default: services().globals.default_room_version(), available, }; diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs index 6184e0bc..36f4fcb7 100644 --- a/src/api/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::client::{ config::{ @@ -17,7 +17,6 @@ use serde_json::{json, value::RawValue as RawJsonValue}; /// /// Sets some account data for the sender user. pub async fn set_global_account_data_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -27,7 +26,7 @@ pub async fn set_global_account_data_route( let event_type = body.event_type.to_string(); - db.account_data.update( + services().account_data.update( None, sender_user, event_type.clone().into(), @@ -35,11 +34,8 @@ pub async fn set_global_account_data_route( "type": event_type, "content": data, }), - &db.globals, )?; - db.flush()?; - Ok(set_global_account_data::v3::Response {}) } @@ -47,7 +43,6 @@ pub async fn set_global_account_data_route( /// /// Sets some room account data for the sender user. pub async fn set_room_account_data_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -57,7 +52,7 @@ pub async fn set_room_account_data_route( let event_type = body.event_type.to_string(); - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, event_type.clone().into(), @@ -65,11 +60,8 @@ pub async fn set_room_account_data_route( "type": event_type, "content": data, }), - &db.globals, )?; - db.flush()?; - Ok(set_room_account_data::v3::Response {}) } @@ -77,12 +69,11 @@ pub async fn set_room_account_data_route( /// /// Gets some account data for the sender user. pub async fn get_global_account_data_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = db + let event: Box = services() .account_data .get(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; @@ -98,12 +89,11 @@ pub async fn get_global_account_data_route( /// /// Gets some room account data for the sender user. pub async fn get_room_account_data_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = db + let event: Box = services() .account_data .get( Some(&body.room_id), diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index e93f5a5b..3551dcfd 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, events::StateEventType, @@ -13,7 +13,6 @@ use tracing::error; /// - Only works if the user is joined (TODO: always allow, but only show events if the user was /// joined, depending on history_visibility) pub async fn get_context_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -28,7 +27,7 @@ pub async fn get_context_route( let mut lazy_loaded = HashSet::new(); - let base_pdu_id = db + let base_pdu_id = services() .rooms .get_pdu_id(&body.event_id)? .ok_or(Error::BadRequest( @@ -36,9 +35,9 @@ pub async fn get_context_route( "Base event id not found.", ))?; - let base_token = db.rooms.pdu_count(&base_pdu_id)?; + let base_token = services().rooms.pdu_count(&base_pdu_id)?; - let base_event = db + let base_event = services() .rooms .get_pdu_from_id(&base_pdu_id)? .ok_or(Error::BadRequest( @@ -48,14 +47,14 @@ pub async fn get_context_route( let room_id = base_event.room_id.clone(); - if !db.rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -67,7 +66,7 @@ pub async fn get_context_route( let base_event = base_event.to_room_event(); - let events_before: Vec<_> = db + let events_before: Vec<_> = services() .rooms .pdus_until(sender_user, &room_id, base_token)? .take( @@ -80,7 +79,7 @@ pub async fn get_context_route( .collect(); for (_, event) in &events_before { - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -93,7 +92,7 @@ pub async fn get_context_route( let start_token = events_before .last() - .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) + .and_then(|(pdu_id, _)| services().rooms.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); let events_before: Vec<_> = events_before @@ -101,7 +100,7 @@ pub async fn get_context_route( .map(|(_, pdu)| pdu.to_room_event()) .collect(); - let events_after: Vec<_> = db + let events_after: Vec<_> = services() .rooms .pdus_after(sender_user, &room_id, base_token)? .take( @@ -114,7 +113,7 @@ pub async fn get_context_route( .collect(); for (_, event) in &events_after { - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -125,23 +124,23 @@ pub async fn get_context_route( } } - let shortstatehash = match db.rooms.pdu_shortstatehash( + let shortstatehash = match services().rooms.pdu_shortstatehash( events_after .last() .map_or(&*body.event_id, |(_, e)| &*e.event_id), )? { Some(s) => s, - None => db + None => services() .rooms .current_shortstatehash(&room_id)? .expect("All rooms have state"), }; - let state_ids = db.rooms.state_full_ids(shortstatehash).await?; + let state_ids = services().rooms.state_full_ids(shortstatehash).await?; let end_token = events_after .last() - .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) + .and_then(|(pdu_id, _)| services().rooms.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); let events_after: Vec<_> = events_after @@ -152,10 +151,10 @@ pub async fn get_context_route( let mut state = Vec::new(); for (shortstatekey, id) in state_ids { - let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services().rooms.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -164,7 +163,7 @@ pub async fn get_context_route( }; state.push(pdu.to_state_event()); } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs index b100bf22..2f559939 100644 --- a/src/api/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services}; use ruma::api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, @@ -11,12 +11,11 @@ use super::SESSION_ID_LENGTH; /// /// Get metadata on all devices of the sender user. pub async fn get_devices_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let devices: Vec = db + let devices: Vec = services() .users .all_devices_metadata(sender_user) .filter_map(|r| r.ok()) // Filter out buggy devices @@ -29,12 +28,11 @@ pub async fn get_devices_route( /// /// Get metadata on a single device of the sender user. pub async fn get_device_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let device = db + let device = services() .users .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; @@ -46,23 +44,20 @@ pub async fn get_device_route( /// /// Updates the metadata on a given device of the sender user. pub async fn update_device_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut device = db + let mut device = services() .users .get_device_metadata(sender_user, &body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; device.display_name = body.display_name.clone(); - db.users + services().users .update_device_metadata(sender_user, &body.device_id, &device)?; - db.flush()?; - Ok(update_device::v3::Response {}) } @@ -76,7 +71,6 @@ pub async fn update_device_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn delete_device_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -94,13 +88,11 @@ pub async fn delete_device_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -108,16 +100,14 @@ pub async fn delete_device_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - db.users.remove_device(sender_user, &body.device_id)?; - - db.flush()?; + services().users.remove_device(sender_user, &body.device_id)?; Ok(delete_device::v3::Response {}) } @@ -134,7 +124,6 @@ pub async fn delete_device_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn delete_devices_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -152,13 +141,11 @@ pub async fn delete_devices_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -166,7 +153,7 @@ pub async fn delete_devices_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -174,10 +161,8 @@ pub async fn delete_devices_route( } for device_id in &body.devices { - db.users.remove_device(sender_user, device_id)? + services().users.remove_device(sender_user, device_id)? } - db.flush()?; - Ok(delete_devices::v3::Response {}) } diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 4e4a3225..87493fa0 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Database, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::{ client::{ @@ -37,11 +37,9 @@ use tracing::{info, warn}; /// /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_filtered_route( - db: DatabaseGuard, body: Ruma, ) -> Result { get_public_rooms_filtered_helper( - &db, body.server.as_deref(), body.limit, body.since.as_deref(), @@ -57,11 +55,9 @@ pub async fn get_public_rooms_filtered_route( /// /// - Rooms are ordered by the number of joined members pub async fn get_public_rooms_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let response = get_public_rooms_filtered_helper( - &db, body.server.as_deref(), body.limit, body.since.as_deref(), @@ -84,17 +80,16 @@ pub async fn get_public_rooms_route( /// /// - TODO: Access control checks pub async fn set_room_visibility_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); match &body.visibility { room::Visibility::Public => { - db.rooms.set_public(&body.room_id, true)?; + services().rooms.set_public(&body.room_id, true)?; info!("{} made {} public", sender_user, body.room_id); } - room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, + room::Visibility::Private => services().rooms.set_public(&body.room_id, false)?, _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -103,8 +98,6 @@ pub async fn set_room_visibility_route( } } - db.flush()?; - Ok(set_room_visibility::v3::Response {}) } @@ -112,11 +105,10 @@ pub async fn set_room_visibility_route( /// /// Gets the visibility of a given room in the room directory. pub async fn get_room_visibility_route( - db: DatabaseGuard, body: Ruma, ) -> Result { Ok(get_room_visibility::v3::Response { - visibility: if db.rooms.is_public_room(&body.room_id)? { + visibility: if services().rooms.is_public_room(&body.room_id)? { room::Visibility::Public } else { room::Visibility::Private @@ -125,19 +117,17 @@ pub async fn get_room_visibility_route( } pub(crate) async fn get_public_rooms_filtered_helper( - db: &Database, server: Option<&ServerName>, limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, ) -> Result { - if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str()) + if let Some(other_server) = server.filter(|server| *server != services().globals.server_name().as_str()) { - let response = db + let response = services() .sending .send_federation_request( - &db.globals, other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, @@ -184,14 +174,14 @@ pub(crate) async fn get_public_rooms_filtered_helper( } } - let mut all_rooms: Vec<_> = db + let mut all_rooms: Vec<_> = services() .rooms .public_rooms() .map(|room_id| { let room_id = room_id?; let chunk = PublicRoomsChunk { - canonical_alias: db + canonical_alias: services() .rooms .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { @@ -201,7 +191,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: db + name: services() .rooms .room_state_get(&room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { @@ -211,7 +201,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room name event in database.") }) })?, - num_joined_members: db + num_joined_members: services() .rooms .room_joined_count(&room_id)? .unwrap_or_else(|| { @@ -220,7 +210,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( }) .try_into() .expect("user count should not be that big"), - topic: db + topic: services() .rooms .room_state_get(&room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { @@ -230,7 +220,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room topic event in database.") }) })?, - world_readable: db + world_readable: services() .rooms .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { @@ -244,7 +234,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( ) }) })?, - guest_can_join: db + guest_can_join: services() .rooms .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { @@ -256,7 +246,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room guest access event in database.") }) })?, - avatar_url: db + avatar_url: services() .rooms .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? .map(|s| { @@ -269,7 +259,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .transpose()? // url is now an Option so we must flatten .flatten(), - join_rule: db + join_rule: services() .rooms .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs index 6522c900..e0c95066 100644 --- a/src/api/client_server/filter.rs +++ b/src/api/client_server/filter.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::api::client::{ error::ErrorKind, filter::{create_filter, get_filter}, @@ -10,11 +10,10 @@ use ruma::api::client::{ /// /// - A user can only access their own filters pub async fn get_filter_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let filter = match db.users.get_filter(sender_user, &body.filter_id)? { + let filter = match services().users.get_filter(sender_user, &body.filter_id)? { Some(filter) => filter, None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), }; @@ -26,11 +25,10 @@ pub async fn get_filter_route( /// /// Creates a new filter to be used by other endpoints. pub async fn create_filter_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(create_filter::v3::Response::new( - db.users.create_filter(sender_user, &body.filter)?, + services().users.create_filter(sender_user, &body.filter)?, )) } diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index c4f91cb2..698bd1ec 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -1,5 +1,5 @@ use super::SESSION_ID_LENGTH; -use crate::{database::DatabaseGuard, utils, Database, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services}; use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -26,39 +26,34 @@ use std::collections::{BTreeMap, HashMap, HashSet}; /// - Adds one time keys /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) pub async fn upload_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); for (key_key, key_value) in &body.one_time_keys { - db.users - .add_one_time_key(sender_user, sender_device, key_key, key_value, &db.globals)?; + services().users + .add_one_time_key(sender_user, sender_device, key_key, key_value)?; } if let Some(device_keys) = &body.device_keys { // TODO: merge this and the existing event? // This check is needed to assure that signatures are kept - if db + if services() .users .get_device_keys(sender_user, sender_device)? .is_none() { - db.users.add_device_keys( + services().users.add_device_keys( sender_user, sender_device, device_keys, - &db.rooms, - &db.globals, )?; } } - db.flush()?; - Ok(upload_keys::v3::Response { - one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, + one_time_key_counts: services().users.count_one_time_keys(sender_user, sender_device)?, }) } @@ -70,7 +65,6 @@ pub async fn upload_keys_route( /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see pub async fn get_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -79,7 +73,6 @@ pub async fn get_keys_route( Some(sender_user), &body.device_keys, |u| u == sender_user, - &db, ) .await?; @@ -90,12 +83,9 @@ pub async fn get_keys_route( /// /// Claims one-time keys pub async fn claim_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - let response = claim_keys_helper(&body.one_time_keys, &db).await?; - - db.flush()?; + let response = claim_keys_helper(&body.one_time_keys).await?; Ok(response) } @@ -106,7 +96,6 @@ pub async fn claim_keys_route( /// /// - Requires UIAA to verify password pub async fn upload_signing_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -124,13 +113,11 @@ pub async fn upload_signing_keys_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( + let (worked, uiaainfo) = services().uiaa.try_auth( sender_user, sender_device, auth, &uiaainfo, - &db.users, - &db.globals, )?; if !worked { return Err(Error::Uiaa(uiaainfo)); @@ -138,7 +125,7 @@ pub async fn upload_signing_keys_route( // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services().uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -146,18 +133,14 @@ pub async fn upload_signing_keys_route( } if let Some(master_key) = &body.master_key { - db.users.add_cross_signing_keys( + services().users.add_cross_signing_keys( sender_user, master_key, &body.self_signing_key, &body.user_signing_key, - &db.rooms, - &db.globals, )?; } - db.flush()?; - Ok(upload_signing_keys::v3::Response {}) } @@ -165,7 +148,6 @@ pub async fn upload_signing_keys_route( /// /// Uploads end-to-end key signatures from the sender user. pub async fn upload_signatures_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -205,20 +187,16 @@ pub async fn upload_signatures_route( ))? .to_owned(), ); - db.users.sign_key( + services().users.sign_key( user_id, key_id, signature, sender_user, - &db.rooms, - &db.globals, )?; } } } - db.flush()?; - Ok(upload_signatures::v3::Response { failures: BTreeMap::new(), // TODO: integrate }) @@ -230,7 +208,6 @@ pub async fn upload_signatures_route( /// /// - TODO: left users pub async fn get_key_changes_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -238,7 +215,7 @@ pub async fn get_key_changes_route( let mut device_list_updates = HashSet::new(); device_list_updates.extend( - db.users + services().users .keys_changed( sender_user.as_str(), body.from @@ -253,9 +230,9 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); - for room_id in db.rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) { + for room_id in services().rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) { device_list_updates.extend( - db.users + services().users .keys_changed( &room_id.to_string(), body.from.parse().map_err(|_| { @@ -278,7 +255,6 @@ pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, device_keys_input: &BTreeMap, Vec>>, allowed_signatures: F, - db: &Database, ) -> Result { let mut master_keys = BTreeMap::new(); let mut self_signing_keys = BTreeMap::new(); @@ -290,7 +266,7 @@ pub(crate) async fn get_keys_helper bool>( for (user_id, device_ids) in device_keys_input { let user_id: &UserId = &**user_id; - if user_id.server_name() != db.globals.server_name() { + if user_id.server_name() != services().globals.server_name() { get_over_federation .entry(user_id.server_name()) .or_insert_with(Vec::new) @@ -300,10 +276,10 @@ pub(crate) async fn get_keys_helper bool>( if device_ids.is_empty() { let mut container = BTreeMap::new(); - for device_id in db.users.all_device_ids(user_id) { + for device_id in services().users.all_device_ids(user_id) { let device_id = device_id?; - if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { - let metadata = db + if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? { + let metadata = services() .users .get_device_metadata(user_id, &device_id)? .ok_or_else(|| { @@ -319,8 +295,8 @@ pub(crate) async fn get_keys_helper bool>( } else { for device_id in device_ids { let mut container = BTreeMap::new(); - if let Some(mut keys) = db.users.get_device_keys(user_id, device_id)? { - let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( + if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? { + let metadata = services().users.get_device_metadata(user_id, device_id)?.ok_or( Error::BadRequest( ErrorKind::InvalidParam, "Tried to get keys for nonexistent device.", @@ -335,17 +311,17 @@ pub(crate) async fn get_keys_helper bool>( } } - if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { + if let Some(master_key) = services().users.get_master_key(user_id, &allowed_signatures)? { master_keys.insert(user_id.to_owned(), master_key); } - if let Some(self_signing_key) = db + if let Some(self_signing_key) = services() .users .get_self_signing_key(user_id, &allowed_signatures)? { self_signing_keys.insert(user_id.to_owned(), self_signing_key); } if Some(user_id) == sender_user { - if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { + if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? { user_signing_keys.insert(user_id.to_owned(), user_signing_key); } } @@ -362,9 +338,8 @@ pub(crate) async fn get_keys_helper bool>( } ( server, - db.sending + services().sending .send_federation_request( - &db.globals, server, federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed, @@ -417,14 +392,13 @@ fn add_unsigned_device_display_name( pub(crate) async fn claim_keys_helper( one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, - db: &Database, ) -> Result { let mut one_time_keys = BTreeMap::new(); let mut get_over_federation = BTreeMap::new(); for (user_id, map) in one_time_keys_input { - if user_id.server_name() != db.globals.server_name() { + if user_id.server_name() != services().globals.server_name() { get_over_federation .entry(user_id.server_name()) .or_insert_with(Vec::new) @@ -434,8 +408,8 @@ pub(crate) async fn claim_keys_helper( let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { if let Some(one_time_keys) = - db.users - .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)? + services().users + .take_one_time_key(user_id, device_id, key_algorithm)? { let mut c = BTreeMap::new(); c.insert(one_time_keys.0, one_time_keys.1); @@ -453,10 +427,9 @@ pub(crate) async fn claim_keys_helper( one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); } // Ignore failures - if let Ok(keys) = db + if let Ok(keys) = services() .sending .send_federation_request( - &db.globals, server, federation::keys::claim_keys::v1::Request { one_time_keys: one_time_keys_input_fed, diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index a9a6d6cd..f0da0849 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -1,6 +1,5 @@ use crate::{ - database::{media::FileMeta, DatabaseGuard}, - utils, Error, Result, Ruma, + utils, Error, Result, Ruma, services, service::media::FileMeta, }; use ruma::api::client::{ error::ErrorKind, @@ -16,11 +15,10 @@ const MXC_LENGTH: usize = 32; /// /// Returns max upload size. pub async fn get_media_config_route( - db: DatabaseGuard, _body: Ruma, ) -> Result { Ok(get_media_config::v3::Response { - upload_size: db.globals.max_request_size().into(), + upload_size: services().globals.max_request_size().into(), }) } @@ -31,19 +29,17 @@ pub async fn get_media_config_route( /// - Some metadata will be saved in the database /// - Media will be saved in the media/ directory pub async fn create_content_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let mxc = format!( "mxc://{}/{}", - db.globals.server_name(), + services().globals.server_name(), utils::random_string(MXC_LENGTH) ); - db.media + services().media .create( mxc.clone(), - &db.globals, &body .filename .as_ref() @@ -54,8 +50,6 @@ pub async fn create_content_route( ) .await?; - db.flush()?; - Ok(create_content::v3::Response { content_uri: mxc.try_into().expect("Invalid mxc:// URI"), blurhash: None, @@ -63,15 +57,13 @@ pub async fn create_content_route( } pub async fn get_remote_content( - db: &DatabaseGuard, mxc: &str, server_name: &ruma::ServerName, media_id: &str, ) -> Result { - let content_response = db + let content_response = services() .sending .send_federation_request( - &db.globals, server_name, get_content::v3::Request { allow_remote: false, @@ -81,10 +73,9 @@ pub async fn get_remote_content( ) .await?; - db.media + services().media .create( mxc.to_string(), - &db.globals, &content_response.content_disposition.as_deref(), &content_response.content_type.as_deref(), &content_response.file, @@ -100,7 +91,6 @@ pub async fn get_remote_content( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -109,16 +99,16 @@ pub async fn get_content_route( content_disposition, content_type, file, - }) = db.media.get(&db.globals, &mxc).await? + }) = services().media.get(&mxc).await? { Ok(get_content::v3::Response { file, content_type, content_disposition, }) - } else if &*body.server_name != db.globals.server_name() && body.allow_remote { + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = - get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; + get_remote_content(&mxc, &body.server_name, &body.media_id).await?; Ok(remote_content_response) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -131,7 +121,6 @@ pub async fn get_content_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_as_filename_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); @@ -140,16 +129,16 @@ pub async fn get_content_as_filename_route( content_disposition: _, content_type, file, - }) = db.media.get(&db.globals, &mxc).await? + }) = services().media.get(&mxc).await? { Ok(get_content_as_filename::v3::Response { file, content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), }) - } else if &*body.server_name != db.globals.server_name() && body.allow_remote { + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = - get_remote_content(&db, &mxc, &body.server_name, &body.media_id).await?; + get_remote_content(&mxc, &body.server_name, &body.media_id).await?; Ok(get_content_as_filename::v3::Response { content_disposition: Some(format!("inline: filename={}", body.filename)), @@ -167,18 +156,16 @@ pub async fn get_content_as_filename_route( /// /// - Only allows federation if `allow_remote` is true pub async fn get_content_thumbnail_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); if let Some(FileMeta { content_type, file, .. - }) = db + }) = services() .media .get_thumbnail( &mxc, - &db.globals, body.width .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, @@ -189,11 +176,10 @@ pub async fn get_content_thumbnail_route( .await? { Ok(get_content_thumbnail::v3::Response { file, content_type }) - } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_thumbnail_response = db + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let get_thumbnail_response = services() .sending .send_federation_request( - &db.globals, &body.server_name, get_content_thumbnail::v3::Request { allow_remote: false, @@ -206,10 +192,9 @@ pub async fn get_content_thumbnail_route( ) .await?; - db.media + services().media .upload_thumbnail( mxc, - &db.globals, &None, &get_thumbnail_response.content_type, body.width.try_into().expect("all UInts are valid u32s"), diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index ecd26d1a..b000ec1b 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -1,9 +1,3 @@ -use crate::{ - client_server, - database::DatabaseGuard, - pdu::{EventHash, PduBuilder, PduEvent}, - server_server, utils, Database, Error, Result, Ruma, -}; use ruma::{ api::{ client::{ @@ -29,13 +23,17 @@ use ruma::{ }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap}, + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, iter, sync::{Arc, RwLock}, time::{Duration, Instant}, }; use tracing::{debug, error, warn}; +use crate::{services, PduEvent, service::pdu::{gen_event_id_canonical_json, PduBuilder}, Error, api::{server_server}, utils, Ruma}; + +use super::get_alias_helper; + /// # `POST /_matrix/client/r0/rooms/{roomId}/join` /// /// Tries to join the sender user into a room. @@ -43,14 +41,13 @@ use tracing::{debug, error, warn}; /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut servers = Vec::new(); // There is no body.server_name for /roomId/join servers.extend( - db.rooms + services().rooms .invite_state(sender_user, &body.room_id)? .unwrap_or_default() .iter() @@ -64,7 +61,6 @@ pub async fn join_room_by_id_route( servers.push(body.room_id.server_name().to_owned()); let ret = join_room_by_id_helper( - &db, body.sender_user.as_deref(), &body.room_id, &servers, @@ -72,8 +68,6 @@ pub async fn join_room_by_id_route( ) .await; - db.flush()?; - ret } @@ -84,7 +78,6 @@ pub async fn join_room_by_id_route( /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation pub async fn join_room_by_id_or_alias_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_deref().expect("user is authenticated"); @@ -94,7 +87,7 @@ pub async fn join_room_by_id_or_alias_route( Ok(room_id) => { let mut servers = body.server_name.clone(); servers.extend( - db.rooms + services().rooms .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() @@ -109,14 +102,13 @@ pub async fn join_room_by_id_or_alias_route( (servers, room_id) } Err(room_alias) => { - let response = client_server::get_alias_helper(&db, &room_alias).await?; + let response = get_alias_helper(&room_alias).await?; (response.servers.into_iter().collect(), response.room_id) } }; let join_room_response = join_room_by_id_helper( - &db, Some(sender_user), &room_id, &servers, @@ -124,8 +116,6 @@ pub async fn join_room_by_id_or_alias_route( ) .await?; - db.flush()?; - Ok(join_room_by_id_or_alias::v3::Response { room_id: join_room_response.room_id, }) @@ -137,14 +127,11 @@ pub async fn join_room_by_id_or_alias_route( /// /// - This should always work if the user is currently joined. pub async fn leave_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.leave_room(sender_user, &body.room_id, &db).await?; - - db.flush()?; + services().rooms.leave_room(sender_user, &body.room_id).await?; Ok(leave_room::v3::Response::new()) } @@ -153,14 +140,12 @@ pub async fn leave_room_route( /// /// Tries to send an invite event into the room. pub async fn invite_user_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if let invite_user::v3::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { - invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; - db.flush()?; + invite_helper(sender_user, user_id, &body.room_id, false).await?; Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) @@ -171,13 +156,12 @@ pub async fn invite_user_route( /// /// Tries to send a kick event into the room. pub async fn kick_user_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - db.rooms + services().rooms .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -196,7 +180,7 @@ pub async fn kick_user_route( // TODO: reason let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -205,7 +189,7 @@ pub async fn kick_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -215,14 +199,11 @@ pub async fn kick_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - Ok(kick_user::v3::Response::new()) } @@ -230,14 +211,13 @@ pub async fn kick_user_route( /// /// Tries to send a ban event into the room. pub async fn ban_user_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason - let event = db + let event = services() .rooms .room_state_get( &body.room_id, @@ -247,11 +227,11 @@ pub async fn ban_user_route( .map_or( Ok(RoomMemberEventContent { membership: MembershipState::Ban, - displayname: db.users.displayname(&body.user_id)?, - avatar_url: db.users.avatar_url(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, + avatar_url: services().users.avatar_url(&body.user_id)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, reason: None, join_authorized_via_users_server: None, }), @@ -266,7 +246,7 @@ pub async fn ban_user_route( )?; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -275,7 +255,7 @@ pub async fn ban_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -285,14 +265,11 @@ pub async fn ban_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - Ok(ban_user::v3::Response::new()) } @@ -300,13 +277,12 @@ pub async fn ban_user_route( /// /// Tries to send an unban event into the room. pub async fn unban_user_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - db.rooms + services().rooms .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -324,7 +300,7 @@ pub async fn unban_user_route( event.membership = MembershipState::Leave; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -333,7 +309,7 @@ pub async fn unban_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -343,14 +319,11 @@ pub async fn unban_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - Ok(unban_user::v3::Response::new()) } @@ -363,14 +336,11 @@ pub async fn unban_user_route( /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device pub async fn forget_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.forget(&body.room_id, sender_user)?; - - db.flush()?; + services().rooms.forget(&body.room_id, sender_user)?; Ok(forget_room::v3::Response::new()) } @@ -379,13 +349,12 @@ pub async fn forget_room_route( /// /// Lists all rooms the user has joined. pub async fn joined_rooms_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(joined_rooms::v3::Response { - joined_rooms: db + joined_rooms: services() .rooms .rooms_joined(sender_user) .filter_map(|r| r.ok()) @@ -399,13 +368,12 @@ pub async fn joined_rooms_route( /// /// - Only works if the user is currently joined pub async fn get_member_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -413,7 +381,7 @@ pub async fn get_member_events_route( } Ok(get_member_events::v3::Response { - chunk: db + chunk: services() .rooms .room_state_full(&body.room_id) .await? @@ -431,12 +399,11 @@ pub async fn get_member_events_route( /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined pub async fn joined_members_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -444,9 +411,9 @@ pub async fn joined_members_route( } let mut joined = BTreeMap::new(); - for user_id in db.rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { - let display_name = db.users.displayname(&user_id)?; - let avatar_url = db.users.avatar_url(&user_id)?; + for user_id in services().rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { + let display_name = services().users.displayname(&user_id)?; + let avatar_url = services().users.avatar_url(&user_id)?; joined.insert( user_id, @@ -460,9 +427,7 @@ pub async fn joined_members_route( Ok(joined_members::v3::Response { joined }) } -#[tracing::instrument(skip(db))] async fn join_room_by_id_helper( - db: &Database, sender_user: Option<&UserId>, room_id: &RoomId, servers: &[Box], @@ -471,7 +436,7 @@ async fn join_room_by_id_helper( let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -481,21 +446,20 @@ async fn join_room_by_id_helper( let state_lock = mutex_state.lock().await; // Ask a remote server if we don't have this room - if !db.rooms.exists(room_id)? { + if !services().rooms.exists(room_id)? { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); for remote_server in servers { - let make_join_response = db + let make_join_response = services() .sending .send_federation_request( - &db.globals, remote_server, federation::membership::prepare_join_event::v1::Request { room_id, user_id: sender_user, - ver: &db.globals.supported_room_versions(), + ver: &services().globals.supported_room_versions(), }, ) .await; @@ -510,7 +474,7 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if db.rooms.is_supported_version(&db, &room_version) => room_version, + Some(room_version) if services().rooms.is_supported_version(&room_version) => room_version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -522,7 +486,7 @@ async fn join_room_by_id_helper( // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), ); join_event_stub.insert( "origin_server_ts".to_owned(), @@ -536,11 +500,11 @@ async fn join_room_by_id_helper( "content".to_owned(), to_canonical_value(RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, join_authorized_via_users_server: None, }) @@ -552,8 +516,8 @@ async fn join_room_by_id_helper( // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut join_event_stub, &room_version, ) @@ -577,10 +541,9 @@ async fn join_room_by_id_helper( // It has enough fields to be called a proper event now let join_event = join_event_stub; - let send_join_response = db + let send_join_response = services() .sending .send_federation_request( - &db.globals, remote_server, federation::membership::create_join_event::v2::Request { room_id, @@ -590,7 +553,7 @@ async fn join_room_by_id_helper( ) .await?; - db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; + services().rooms.get_or_create_shortroomid(room_id, &services().globals)?; let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; @@ -602,7 +565,6 @@ async fn join_room_by_id_helper( &send_join_response, &room_version, &pub_key_map, - db, ) .await?; @@ -610,7 +572,7 @@ async fn join_room_by_id_helper( .room_state .state .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, @@ -622,29 +584,27 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") })?; - db.rooms.add_pdu_outlier(&event_id, &value)?; + services().rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = db.rooms.get_or_create_shortstatekey( + let shortstatekey = services().rooms.get_or_create_shortstatekey( &pdu.kind.to_string().into(), state_key, - &db.globals, )?; state.insert(shortstatekey, pdu.event_id.clone()); } } - let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey( + let incoming_shortstatekey = services().rooms.get_or_create_shortstatekey( &parsed_pdu.kind.to_string().into(), parsed_pdu .state_key .as_ref() .expect("Pdu is a membership state event"), - &db.globals, )?; state.insert(incoming_shortstatekey, parsed_pdu.event_id.clone()); - let create_shortstatekey = db + let create_shortstatekey = services() .rooms .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); @@ -653,56 +613,54 @@ async fn join_room_by_id_helper( return Err(Error::BadServerResponse("State contained no create event.")); } - db.rooms.force_state( + services().rooms.force_state( room_id, state .into_iter() - .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals)) + .map(|(k, id)| services().rooms.compress_state_event(k, &id)) .collect::>()?, - db, )?; for result in send_join_response .room_state .auth_chain .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, Err(_) => continue, }; - db.rooms.add_pdu_outlier(&event_id, &value)?; + services().rooms.add_pdu_outlier(&event_id, &value)?; } // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&parsed_pdu, &db.globals)?; + let statehashid = services().rooms.append_to_state(&parsed_pdu)?; - db.rooms.append_pdu( + services().rooms.append_pdu( &parsed_pdu, join_event, iter::once(&*parsed_pdu.event_id), - db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - db.rooms.set_room_state(room_id, statehashid)?; + services().rooms.set_room_state(room_id, statehashid)?; } else { let event = RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, join_authorized_via_users_server: None, }; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -712,15 +670,13 @@ async fn join_room_by_id_helper( }, sender_user, room_id, - db, + services(), &state_lock, )?; } drop(state_lock); - db.flush()?; - Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } @@ -728,7 +684,6 @@ fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock>>, - db: &Database, ) -> Result<(Box, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); @@ -741,14 +696,14 @@ fn validate_and_add_event_id( )) .expect("ruma's reference hashes are valid event ids"); - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + let back_off = |id| match services().globals.bad_event_ratelimiter.write().unwrap().entry(id) { Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; - if let Some((time, tries)) = db + if let Some((time, tries)) = services() .globals .bad_event_ratelimiter .read() @@ -791,13 +746,12 @@ pub(crate) async fn invite_helper<'a>( sender_user: &UserId, user_id: &UserId, room_id: &RoomId, - db: &Database, is_direct: bool, ) -> Result<()> { - if user_id.server_name() != db.globals.server_name() { - let (room_version_id, pdu_json, invite_room_state) = { + if user_id.server_name() != services().globals.server_name() { + let (pdu_json, invite_room_state) = { let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -818,36 +772,38 @@ pub(crate) async fn invite_helper<'a>( }) .expect("member event is valid value"); - let state_key = user_id.to_string(); - let kind = StateEventType::RoomMember; - - let (pdu, pdu_json) = create_hash_and_sign_event(); + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event(PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, sender_user, room_id, &state_lock); - let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; + let invite_room_state = services().rooms.calculate_invite_state(&pdu)?; drop(state_lock); - (room_version_id, pdu_json, invite_room_state) + (pdu_json, invite_room_state) }; // Generate event id let expected_event_id = format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) + ruma::signatures::reference_hash(&pdu_json, &services().rooms.state.get_room_version(&room_id)?) .expect("ruma can calculate reference hashes") ); let expected_event_id = <&EventId>::try_from(expected_event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); - let response = db + let response = services() .sending .send_federation_request( - &db.globals, user_id.server_name(), create_invite::v2::Request { room_id, event_id: expected_event_id, - room_version: &room_version_id, + room_version: &services().state.get_room_version(&room_id)?, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, @@ -857,7 +813,7 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event, &db) + let (event_id, value) = match gen_event_id_canonical_json(&response.event) { Ok(t) => t, Err(_) => { @@ -882,13 +838,12 @@ pub(crate) async fn invite_helper<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = server_server::handle_incoming_pdu( + let pdu_id = services().rooms.event_handler.handle_incoming_pdu( &origin, &event_id, room_id, value, true, - db, &pub_key_map, ) .await @@ -903,18 +858,18 @@ pub(crate) async fn invite_helper<'a>( "Could not accept incoming PDU as timeline event.", ))?; - let servers = db + let servers = services() .rooms .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); + .filter(|server| &**server != services().globals.server_name()); - db.sending.send_pdu(servers, &pdu_id)?; + services().sending.send_pdu(servers, &pdu_id)?; return Ok(()); } - if !db.rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -922,7 +877,7 @@ pub(crate) async fn invite_helper<'a>( } let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -931,16 +886,16 @@ pub(crate) async fn invite_helper<'a>( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, - displayname: db.users.displayname(user_id)?, - avatar_url: db.users.avatar_url(user_id)?, + displayname: services().users.displayname(user_id)?, + avatar_url: services().users.avatar_url(user_id)?, is_direct: Some(is_direct), third_party_invite: None, - blurhash: db.users.blurhash(user_id)?, + blurhash: services().users.blurhash(user_id)?, reason: None, join_authorized_via_users_server: None, }) @@ -951,7 +906,6 @@ pub(crate) async fn invite_helper<'a>( }, sender_user, room_id, - db, &state_lock, )?; @@ -960,208 +914,196 @@ pub(crate) async fn invite_helper<'a>( Ok(()) } - // Make a user leave all their joined rooms - #[tracing::instrument(skip(self, db))] - pub async fn leave_all_rooms(&self, user_id: &UserId, db: &Database) -> Result<()> { - let all_rooms = db - .rooms - .rooms_joined(user_id) - .chain(db.rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) - .collect::>(); - - for room_id in all_rooms { - let room_id = match room_id { - Ok(room_id) => room_id, - Err(_) => continue, - }; - - let _ = self.leave_room(user_id, &room_id, db).await; - } +// Make a user leave all their joined rooms +pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { + let all_rooms = services() + .rooms + .rooms_joined(user_id) + .chain(services().rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .collect::>(); + + for room_id in all_rooms { + let room_id = match room_id { + Ok(room_id) => room_id, + Err(_) => continue, + }; - Ok(()) + let _ = leave_room(user_id, &room_id).await; } - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; + Ok(()) +} - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; +pub async fn leave_room( + user_id: &UserId, + room_id: &RoomId, +) -> Result<()> { + // Ask a remote server if we don't have this room + if !services().rooms.metadata.exists(room_id)? && room_id.server_name() != services().globals.server_name() { + if let Err(e) = remote_leave_room(user_id, room_id).await { + warn!("Failed to leave room {} remotely: {}", user_id, e); + // Don't tell the client about this error + } - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; + let last_state = services().rooms.state_cache + .invite_state(user_id, room_id)? + .map_or_else(|| services().rooms.left_state(user_id, room_id), |s| Ok(Some(s)))?; - event.membership = MembershipState::Leave; + // We always drop the invite, we can't rely on other servers + services().rooms.state_cache.update_membership( + room_id, + user_id, + MembershipState::Leave, + user_id, + last_state, + true, + )?; + } else { + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let mut event: RoomMemberEventContent = serde_json::from_str( + services().rooms.state.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; - self.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } + event.membership = MembershipState::Leave; - Ok(()) + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + &state_lock, + )?; } - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); + Ok(()) +} - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; +async fn remote_leave_room( + user_id: &UserId, + room_id: &RoomId, +) -> Result<()> { + let mut make_leave_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in leaving.", + )); - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::parse(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); + let invite_state = services() + .rooms + .invite_state(user_id, room_id)? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "User is not invited.", + ))?; - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, - ) - .await; + let servers: HashSet<_> = invite_state + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect(); + + for remote_server in servers { + let make_leave_response = services() + .sending + .send_federation_request( + &remote_server, + federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, + ) + .await; - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - if make_leave_response_and_server.is_ok() { - break; - } + if make_leave_response_and_server.is_ok() { + break; } + } - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) if self.is_supported_version(&db, &version) => version, - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; + let (make_leave_response, remote_server) = make_leave_response_and_server?; - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); + let room_version_id = match make_leave_response.room_version { + Some(version) if services().rooms.is_supported_version(&version) => version, + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); + let mut leave_event_stub = + serde_json::from_str::(make_leave_response.event.get()).map_err( + |_| Error::BadServerResponse("Invalid make_leave event json received from server."), + )?; - // Generate event id - let event_id = EventId::parse(format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + leave_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut leave_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; + // Add event_id back + leave_event_stub.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; - Ok(()) - } + services().sending + .send_federation_request( + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id, + event_id: &event_id, + pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + }, + ) + .await?; + Ok(()) +} diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 1348132f..861f9c13 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services, service::pdu::PduBuilder}; use ruma::{ api::client::{ error::ErrorKind, @@ -19,14 +19,13 @@ use std::{ /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed pub async fn send_message_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -37,7 +36,7 @@ pub async fn send_message_event_route( // Forbid m.room.encrypted if encryption is disabled if RoomEventType::RoomEncrypted == body.event_type.to_string().into() - && !db.globals.allow_encryption() + && !services().globals.allow_encryption() { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -47,7 +46,7 @@ pub async fn send_message_event_route( // Check if this is a new transaction id if let Some(response) = - db.transaction_ids + services().transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? { // The client might have sent a txnid of the /sendToDevice endpoint @@ -69,7 +68,7 @@ pub async fn send_message_event_route( let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - let event_id = db.rooms.build_and_append_pdu( + let event_id = services().rooms.build_and_append_pdu( PduBuilder { event_type: body.event_type.to_string().into(), content: serde_json::from_str(body.body.body.json().get()) @@ -80,11 +79,10 @@ pub async fn send_message_event_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; - db.transaction_ids.add_txnid( + services().transaction_ids.add_txnid( sender_user, sender_device, &body.txn_id, @@ -93,8 +91,6 @@ pub async fn send_message_event_route( drop(state_lock); - db.flush()?; - Ok(send_message_event::v3::Response::new( (*event_id).to_owned(), )) @@ -107,13 +103,12 @@ pub async fn send_message_event_route( /// - Only works if the user is joined (TODO: always allow, but only show events where the user was /// joined, depending on history_visibility) pub async fn get_message_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -133,7 +128,7 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); - db.rooms + services().rooms .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; // Use limit or else 10 @@ -147,13 +142,13 @@ pub async fn get_message_events_route( match body.dir { get_message_events::v3::Direction::Forward => { - let events_after: Vec<_> = db + let events_after: Vec<_> = services() .rooms .pdus_after(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { - db.rooms + services().rooms .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() @@ -162,7 +157,7 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_after { - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &body.room_id, @@ -184,13 +179,13 @@ pub async fn get_message_events_route( resp.chunk = events_after; } get_message_events::v3::Direction::Backward => { - let events_before: Vec<_> = db + let events_before: Vec<_> = services() .rooms .pdus_until(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { - db.rooms + services().rooms .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() @@ -199,7 +194,7 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_before { - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( sender_user, sender_device, &body.room_id, @@ -225,7 +220,7 @@ pub async fn get_message_events_route( resp.state = Vec::new(); for ll_id in &lazy_loaded { if let Some(member_event) = - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? { resp.state.push(member_event.to_state_event()); @@ -233,7 +228,7 @@ pub async fn get_message_events_route( } if let Some(next_token) = next_token { - db.rooms.lazy_load_mark_sent( + services().rooms.lazy_load_mark_sent( sender_user, sender_device, &body.room_id, diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index 773fef47..bc220b80 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils, Result, Ruma}; +use crate::{utils, Result, Ruma, services}; use ruma::api::client::presence::{get_presence, set_presence}; use std::time::Duration; @@ -6,22 +6,21 @@ use std::time::Duration; /// /// Sets the presence state of the sender user. pub async fn set_presence_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for room_id in db.rooms.rooms_joined(sender_user) { + for room_id in services().rooms.rooms_joined(sender_user) { let room_id = room_id?; - db.rooms.edus.update_presence( + services().rooms.edus.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -32,12 +31,9 @@ pub async fn set_presence_route( }, sender: sender_user.clone(), }, - &db.globals, )?; } - db.flush()?; - Ok(set_presence::v3::Response {}) } @@ -47,20 +43,19 @@ pub async fn set_presence_route( /// /// - Only works if you share a room with the user pub async fn get_presence_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut presence_event = None; - for room_id in db + for room_id in services() .rooms .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? { let room_id = room_id?; - if let Some(presence) = db + if let Some(presence) = services() .rooms .edus .get_last_presence_event(sender_user, &room_id)? diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index acea19f0..7a87bcd1 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services, service::pdu::PduBuilder}; use ruma::{ api::{ client::{ @@ -20,16 +20,15 @@ use std::sync::Arc; /// /// - Also makes sure other users receive the update using presence EDUs pub async fn set_displayname_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.users + services().users .set_displayname(sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms - let all_rooms_joined: Vec<_> = db + let all_rooms_joined: Vec<_> = services() .rooms .rooms_joined(sender_user) .filter_map(|r| r.ok()) @@ -40,7 +39,7 @@ pub async fn set_displayname_route( content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( - db.rooms + services().rooms .room_state_get( &room_id, &StateEventType::RoomMember, @@ -70,7 +69,7 @@ pub async fn set_displayname_route( for (pdu_builder, room_id) in all_rooms_joined { let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -79,19 +78,19 @@ pub async fn set_displayname_route( ); let state_lock = mutex_state.lock().await; - let _ = db + let _ = services() .rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock); + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); // Presence update - db.rooms.edus.update_presence( + services().rooms.edus.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -102,12 +101,9 @@ pub async fn set_displayname_route( }, sender: sender_user.clone(), }, - &db.globals, )?; } - db.flush()?; - Ok(set_display_name::v3::Response {}) } @@ -117,14 +113,12 @@ pub async fn set_displayname_route( /// /// - If user is on another server: Fetches displayname over federation pub async fn get_displayname_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -139,7 +133,7 @@ pub async fn get_displayname_route( } Ok(get_display_name::v3::Response { - displayname: db.users.displayname(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, }) } @@ -149,18 +143,17 @@ pub async fn get_displayname_route( /// /// - Also makes sure other users receive the update using presence EDUs pub async fn set_avatar_url_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.users + services().users .set_avatar_url(sender_user, body.avatar_url.clone())?; - db.users.set_blurhash(sender_user, body.blurhash.clone())?; + services().users.set_blurhash(sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms - let all_joined_rooms: Vec<_> = db + let all_joined_rooms: Vec<_> = services() .rooms .rooms_joined(sender_user) .filter_map(|r| r.ok()) @@ -171,7 +164,7 @@ pub async fn set_avatar_url_route( content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( - db.rooms + services().rooms .room_state_get( &room_id, &StateEventType::RoomMember, @@ -201,7 +194,7 @@ pub async fn set_avatar_url_route( for (pdu_builder, room_id) in all_joined_rooms { let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -210,19 +203,19 @@ pub async fn set_avatar_url_route( ); let state_lock = mutex_state.lock().await; - let _ = db + let _ = services() .rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock); + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); // Presence update - db.rooms.edus.update_presence( + services().rooms.edus.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -233,12 +226,10 @@ pub async fn set_avatar_url_route( }, sender: sender_user.clone(), }, - &db.globals, + &services().globals, )?; } - db.flush()?; - Ok(set_avatar_url::v3::Response {}) } @@ -248,14 +239,12 @@ pub async fn set_avatar_url_route( /// /// - If user is on another server: Fetches avatar_url and blurhash over federation pub async fn get_avatar_url_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -271,8 +260,8 @@ pub async fn get_avatar_url_route( } Ok(get_avatar_url::v3::Response { - avatar_url: db.users.avatar_url(&body.user_id)?, - blurhash: db.users.blurhash(&body.user_id)?, + avatar_url: services().users.avatar_url(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, }) } @@ -282,14 +271,12 @@ pub async fn get_avatar_url_route( /// /// - If user is on another server: Fetches profile over federation pub async fn get_profile_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -305,7 +292,7 @@ pub async fn get_profile_route( }); } - if !db.users.exists(&body.user_id)? { + if !services().users.exists(&body.user_id)? { // Return 404 if this user doesn't exist return Err(Error::BadRequest( ErrorKind::NotFound, @@ -314,8 +301,8 @@ pub async fn get_profile_route( } Ok(get_profile::v3::Response { - avatar_url: db.users.avatar_url(&body.user_id)?, - blurhash: db.users.blurhash(&body.user_id)?, - displayname: db.users.displayname(&body.user_id)?, + avatar_url: services().users.avatar_url(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, }) } diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index dc45ea0b..112fa002 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::client::{ error::ErrorKind, @@ -16,12 +16,11 @@ use ruma::{ /// /// Retrieves the push rules event for this user. pub async fn get_pushrules_all_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = db + let event: PushRulesEvent = services() .account_data .get( None, @@ -42,12 +41,11 @@ pub async fn get_pushrules_all_route( /// /// Retrieves a single specified push rule for this user. pub async fn get_pushrule_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = db + let event: PushRulesEvent = services() .account_data .get( None, @@ -98,7 +96,6 @@ pub async fn get_pushrule_route( /// /// Creates a single specified push rule for this user. pub async fn set_pushrule_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -111,7 +108,7 @@ pub async fn set_pushrule_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -186,16 +183,13 @@ pub async fn set_pushrule_route( _ => {} } - db.account_data.update( + services().account_data.update( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &event, - &db.globals, )?; - db.flush()?; - Ok(set_pushrule::v3::Response {}) } @@ -203,7 +197,6 @@ pub async fn set_pushrule_route( /// /// Gets the actions of a single specified push rule for this user. pub async fn get_pushrule_actions_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -215,7 +208,7 @@ pub async fn get_pushrule_actions_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -252,8 +245,6 @@ pub async fn get_pushrule_actions_route( _ => None, }; - db.flush()?; - Ok(get_pushrule_actions::v3::Response { actions: actions.unwrap_or_default(), }) @@ -263,7 +254,6 @@ pub async fn get_pushrule_actions_route( /// /// Sets the actions of a single specified push rule for this user. pub async fn set_pushrule_actions_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -275,7 +265,7 @@ pub async fn set_pushrule_actions_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -322,16 +312,13 @@ pub async fn set_pushrule_actions_route( _ => {} }; - db.account_data.update( + services().account_data.update( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &event, - &db.globals, )?; - db.flush()?; - Ok(set_pushrule_actions::v3::Response {}) } @@ -339,7 +326,6 @@ pub async fn set_pushrule_actions_route( /// /// Gets the enabled status of a single specified push rule for this user. pub async fn get_pushrule_enabled_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -351,7 +337,7 @@ pub async fn get_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -393,8 +379,6 @@ pub async fn get_pushrule_enabled_route( _ => false, }; - db.flush()?; - Ok(get_pushrule_enabled::v3::Response { enabled }) } @@ -402,7 +386,6 @@ pub async fn get_pushrule_enabled_route( /// /// Sets the enabled status of a single specified push rule for this user. pub async fn set_pushrule_enabled_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -414,7 +397,7 @@ pub async fn set_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -466,16 +449,13 @@ pub async fn set_pushrule_enabled_route( _ => {} } - db.account_data.update( + services().account_data.update( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &event, - &db.globals, )?; - db.flush()?; - Ok(set_pushrule_enabled::v3::Response {}) } @@ -483,7 +463,6 @@ pub async fn set_pushrule_enabled_route( /// /// Deletes a single specified push rule for this user. pub async fn delete_pushrule_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -495,7 +474,7 @@ pub async fn delete_pushrule_route( )); } - let mut event: PushRulesEvent = db + let mut event: PushRulesEvent = services() .account_data .get( None, @@ -537,16 +516,13 @@ pub async fn delete_pushrule_route( _ => {} } - db.account_data.update( + services().account_data.update( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), &event, - &db.globals, )?; - db.flush()?; - Ok(delete_pushrule::v3::Response {}) } @@ -554,13 +530,12 @@ pub async fn delete_pushrule_route( /// /// Gets all currently active pushers for the sender user. pub async fn get_pushers_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_pushers::v3::Response { - pushers: db.pusher.get_pushers(sender_user)?, + pushers: services().pusher.get_pushers(sender_user)?, }) } @@ -570,15 +545,12 @@ pub async fn get_pushers_route( /// /// - TODO: Handle `append` pub async fn set_pushers_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); - db.pusher.set_pusher(sender_user, pusher)?; - - db.flush()?; + services().pusher.set_pusher(sender_user, pusher)?; Ok(set_pusher::v3::Response::default()) } diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index 91988a47..284ae65e 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, events::RoomAccountDataEventType, @@ -14,7 +14,6 @@ use std::collections::BTreeMap; /// - Updates fully-read account data event to `fully_read` /// - If `read_receipt` is set: Update private marker and public read receipt EDU pub async fn set_read_marker_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -24,25 +23,23 @@ pub async fn set_read_marker_route( event_id: body.fully_read.clone(), }, }; - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, RoomAccountDataEventType::FullyRead, &fully_read_event, - &db.globals, )?; if let Some(event) = &body.read_receipt { - db.rooms.edus.private_read_set( + services().rooms.edus.private_read_set( &body.room_id, sender_user, - db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( + services().rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, - &db.globals, )?; - db.rooms + services().rooms .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -59,19 +56,16 @@ pub async fn set_read_marker_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(event.to_owned(), receipts); - db.rooms.edus.readreceipt_update( + services().rooms.edus.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), }, - &db.globals, )?; } - db.flush()?; - Ok(set_read_marker::v3::Response {}) } @@ -79,23 +73,21 @@ pub async fn set_read_marker_route( /// /// Sets private read marker and public read receipt EDU. pub async fn create_receipt_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.edus.private_read_set( + services().rooms.edus.private_read_set( &body.room_id, sender_user, - db.rooms + services().rooms .get_pdu_count(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, - &db.globals, )?; - db.rooms + services().rooms .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -111,17 +103,16 @@ pub async fn create_receipt_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(body.event_id.to_owned(), receipts); - db.rooms.edus.readreceipt_update( + services().rooms.edus.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { content: ruma::events::receipt::ReceiptEventContent(receipt_content), room_id: body.room_id.clone(), }, - &db.globals, )?; - db.flush()?; + services().flush()?; Ok(create_receipt::v3::Response {}) } diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index 059e0f52..d6699bcf 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{database::DatabaseGuard, pdu::PduBuilder, Result, Ruma}; +use crate::{Result, Ruma, services, service::pdu::PduBuilder}; use ruma::{ api::client::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, RoomEventType}, @@ -14,14 +14,13 @@ use serde_json::value::to_raw_value; /// /// - TODO: Handle txn id pub async fn redact_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let body = body.body; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -30,7 +29,7 @@ pub async fn redact_event_route( ); let state_lock = mutex_state.lock().await; - let event_id = db.rooms.build_and_append_pdu( + let event_id = services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { @@ -43,14 +42,11 @@ pub async fn redact_event_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - let event_id = (*event_id).to_owned(); Ok(redact_event::v3::Response { event_id }) } diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index 14768e1c..2c2a5493 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, utils::HtmlEscape, Error, Result, Ruma}; +use crate::{utils::HtmlEscape, Error, Result, Ruma, services}; use ruma::{ api::client::{error::ErrorKind, room::report_content}, events::room::message, @@ -10,12 +10,11 @@ use ruma::{ /// Reports an inappropriate event to homeserver admins /// pub async fn report_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let pdu = match db.rooms.get_pdu(&body.event_id)? { + let pdu = match services().rooms.get_pdu(&body.event_id)? { Some(pdu) => pdu, _ => { return Err(Error::BadRequest( @@ -39,7 +38,7 @@ pub async fn report_event_route( )); }; - db.admin + services().admin .send_message(message::RoomMessageEventContent::text_html( format!( "Report received from: {}\n\n\ @@ -66,7 +65,5 @@ pub async fn report_event_route( ), )); - db.flush()?; - Ok(report_content::v3::Response {}) } diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 5ae7224c..14affc65 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,5 +1,5 @@ use crate::{ - client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, Error, Result, Ruma, + Error, Result, Ruma, service::pdu::PduBuilder, services, api::client_server::invite_helper, }; use ruma::{ api::client::{ @@ -46,19 +46,18 @@ use tracing::{info, warn}; /// - Send events implied by `name` and `topic` /// - Send invite events pub async fn create_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { use create_room::v3::RoomPreset; let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let room_id = RoomId::new(db.globals.server_name()); + let room_id = RoomId::new(services().globals.server_name()); - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + services().rooms.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -67,9 +66,9 @@ pub async fn create_room_route( ); let state_lock = mutex_state.lock().await; - if !db.globals.allow_room_creation() + if !services().globals.allow_room_creation() && !body.from_appservice - && !db.users.is_admin(sender_user, &db.rooms, &db.globals)? + && !services().users.is_admin(sender_user)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -83,12 +82,12 @@ pub async fn create_room_route( .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length let alias = - RoomAliasId::parse(format!("#{}:{}", localpart, db.globals.server_name())) + RoomAliasId::parse(format!("#{}:{}", localpart, services().globals.server_name())) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") })?; - if db.rooms.id_from_alias(&alias)?.is_some() { + if services().rooms.id_from_alias(&alias)?.is_some() { Err(Error::BadRequest( ErrorKind::RoomInUse, "Room alias already exists.", @@ -100,7 +99,7 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if db.rooms.is_supported_version(&db, &room_version) { + if services().rooms.is_supported_version(&services(), &room_version) { room_version } else { return Err(Error::BadRequest( @@ -109,7 +108,7 @@ pub async fn create_room_route( )); } } - None => db.globals.default_room_version(), + None => services().globals.default_room_version(), }; let content = match &body.creation_content { @@ -163,7 +162,7 @@ pub async fn create_room_route( } // 1. The room create event - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -173,21 +172,20 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 2. Let the room creator join - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: Some(body.is_direct), third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, join_authorized_via_users_server: None, }) @@ -198,7 +196,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; @@ -240,7 +237,7 @@ pub async fn create_room_route( } } - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) @@ -251,13 +248,12 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 4. Canonical room alias if let Some(room_alias_id) = &alias { - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { @@ -271,7 +267,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } @@ -279,7 +274,7 @@ pub async fn create_room_route( // 5. Events set by preset // 5.1 Join Rules - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { @@ -294,12 +289,11 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 5.2 History Visibility - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( @@ -312,12 +306,11 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 5.3 Guest Access - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { @@ -331,7 +324,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; @@ -346,18 +338,18 @@ pub async fn create_room_route( pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == RoomEventType::RoomEncryption && !db.globals.allow_encryption() + if pdu_builder.event_type == RoomEventType::RoomEncryption && !services().globals.allow_encryption() { continue; } - db.rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?; + services().rooms + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)?; } // 7. Events implied by name and topic if let Some(name) = &body.name { - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) @@ -368,13 +360,12 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } if let Some(topic) = &body.topic { - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { @@ -387,7 +378,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } @@ -395,22 +385,20 @@ pub async fn create_room_route( // 8. Events implied by invite (and TODO: invite_3pid) drop(state_lock); for user_id in &body.invite { - let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; + let _ = invite_helper(sender_user, user_id, &room_id, body.is_direct).await; } // Homeserver specific stuff if let Some(alias) = alias { - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + services().rooms.set_alias(&alias, Some(&room_id))?; } if body.visibility == room::Visibility::Public { - db.rooms.set_public(&room_id, true)?; + services().rooms.set_public(&room_id, true)?; } info!("{} created a room", sender_user); - db.flush()?; - Ok(create_room::v3::Response::new(room_id)) } @@ -420,12 +408,11 @@ pub async fn create_room_route( /// /// - You have to currently be joined to the room (TODO: Respect history visibility) pub async fn get_room_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -433,7 +420,7 @@ pub async fn get_room_event_route( } Ok(get_room_event::v3::Response { - event: db + event: services() .rooms .get_pdu(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? @@ -447,12 +434,11 @@ pub async fn get_room_event_route( /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable pub async fn get_room_aliases_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -460,7 +446,7 @@ pub async fn get_room_aliases_route( } Ok(aliases::v3::Response { - aliases: db + aliases: services() .rooms .room_aliases(&body.room_id) .filter_map(|a| a.ok()) @@ -479,12 +465,11 @@ pub async fn get_room_aliases_route( /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking pub async fn upgrade_room_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_supported_version(&db, &body.new_version) { + if !services().rooms.is_supported_version(&body.new_version) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", @@ -492,12 +477,12 @@ pub async fn upgrade_room_route( } // Create a replacement room - let replacement_room = RoomId::new(db.globals.server_name()); - db.rooms - .get_or_create_shortroomid(&replacement_room, &db.globals)?; + let replacement_room = RoomId::new(services().globals.server_name()); + services().rooms + .get_or_create_shortroomid(&replacement_room)?; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -508,7 +493,7 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = db.rooms.build_and_append_pdu( + let tombstone_event_id = services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { @@ -522,14 +507,13 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; // Change lock to replacement room drop(state_lock); let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -540,7 +524,7 @@ pub async fn upgrade_room_route( // Get the old room creation event let mut create_event_content = serde_json::from_str::( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content @@ -588,7 +572,7 @@ pub async fn upgrade_room_route( )); } - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&create_event_content) @@ -599,21 +583,20 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; // Join the new room - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, join_authorized_via_users_server: None, }) @@ -624,7 +607,6 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; @@ -643,12 +625,12 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { - let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? { + let event_content = match services().rooms.room_state_get(&body.room_id, &event_type, "")? { Some(v) => v.content.clone(), None => continue, // Skipping missing events. }; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), content: event_content, @@ -658,20 +640,19 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; } // Moves any local aliases to the new room - for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) { - db.rooms - .set_alias(&alias, Some(&replacement_room), &db.globals)?; + for alias in services().rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) { + services().rooms + .set_alias(&alias, Some(&replacement_room))?; } // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content @@ -685,7 +666,7 @@ pub async fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = db.rooms.build_and_append_pdu( + let _ = services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) @@ -696,35 +677,12 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - // Return the replacement room id Ok(upgrade_room::v3::Response { replacement_room }) } - /// Returns the room's version. - #[tracing::instrument(skip(self))] - pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - let room_version = create_event_content - .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; - Ok(room_version) - } - diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index 686e3b5e..b7eecd5a 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::api::client::{ error::ErrorKind, search::search_events::{ @@ -15,7 +15,6 @@ use std::collections::BTreeMap; /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) pub async fn search_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -24,7 +23,7 @@ pub async fn search_events_route( let filter = &search_criteria.filter; let room_ids = filter.rooms.clone().unwrap_or_else(|| { - db.rooms + services().rooms .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect() @@ -35,14 +34,14 @@ pub async fn search_events_route( let mut searches = Vec::new(); for room_id in room_ids { - if !db.rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - if let Some(search) = db + if let Some(search) = services() .rooms .search_pdus(&room_id, &search_criteria.search_term)? { @@ -85,7 +84,7 @@ pub async fn search_events_route( start: None, }, rank: None, - result: db + result: services() .rooms .get_pdu_from_id(result)? .map(|pdu| pdu.to_room_event()), diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index c2a79ca6..7feeb66c 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -1,5 +1,5 @@ use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services}; use ruma::{ api::client::{ error::ErrorKind, @@ -41,7 +41,6 @@ pub async fn get_login_types_route( /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. pub async fn login_route( - db: DatabaseGuard, body: Ruma, ) -> Result { // Validate login method @@ -57,11 +56,11 @@ pub async fn login_route( return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; let user_id = - UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + UserId::parse_with_server_name(username.to_owned(), services().globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; - let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( + let hash = services().users.password_hash(&user_id)?.ok_or(Error::BadRequest( ErrorKind::Forbidden, "Wrong username or password.", ))?; @@ -85,7 +84,7 @@ pub async fn login_route( user_id } login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => { - if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { + if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( token, jwt_decoding_key, @@ -93,7 +92,7 @@ pub async fn login_route( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?; let username = token.claims.sub; - UserId::parse_with_server_name(username, db.globals.server_name()).map_err( + UserId::parse_with_server_name(username, services().globals.server_name()).map_err( |_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."), )? } else { @@ -122,15 +121,15 @@ pub async fn login_route( // Determine if device_id was provided and exists in the db for this user let device_exists = body.device_id.as_ref().map_or(false, |device_id| { - db.users + services().users .all_device_ids(&user_id) .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); if device_exists { - db.users.set_token(&user_id, &device_id, &token)?; + services().users.set_token(&user_id, &device_id, &token)?; } else { - db.users.create_device( + services().users.create_device( &user_id, &device_id, &token, @@ -140,12 +139,10 @@ pub async fn login_route( info!("{} logged in", user_id); - db.flush()?; - Ok(login::v3::Response { user_id, access_token: token, - home_server: Some(db.globals.server_name().to_owned()), + home_server: Some(services().globals.server_name().to_owned()), device_id, well_known: None, }) @@ -160,15 +157,12 @@ pub async fn login_route( /// - Forgets to-device events /// - Triggers device list updates pub async fn logout_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - db.users.remove_device(sender_user, sender_device)?; - - db.flush()?; + services().users.remove_device(sender_user, sender_device)?; Ok(logout::v3::Response::new()) } @@ -185,16 +179,13 @@ pub async fn logout_route( /// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) /// from each device of this user. pub async fn logout_all_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for device_id in db.users.all_device_ids(sender_user).flatten() { - db.users.remove_device(sender_user, &device_id)?; + for device_id in services().users.all_device_ids(sender_user).flatten() { + services().users.remove_device(sender_user, &device_id)?; } - db.flush()?; - Ok(logout_all::v3::Response::new()) } diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 4df953cf..4e8d594e 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -1,7 +1,7 @@ use std::sync::Arc; use crate::{ - database::DatabaseGuard, pdu::PduBuilder, Database, Error, Result, Ruma, RumaResponse, + Error, Result, Ruma, RumaResponse, services, service::pdu::PduBuilder, }; use ruma::{ api::client::{ @@ -27,13 +27,11 @@ use ruma::{ /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_key_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( - &db, sender_user, &body.room_id, &body.event_type, @@ -42,8 +40,6 @@ pub async fn send_state_event_for_key_route( ) .await?; - db.flush()?; - let event_id = (*event_id).to_owned(); Ok(send_state_event::v3::Response { event_id }) } @@ -56,13 +52,12 @@ pub async fn send_state_event_for_key_route( /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect pub async fn send_state_event_for_empty_key_route( - db: DatabaseGuard, body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled - if body.event_type == StateEventType::RoomEncryption && !db.globals.allow_encryption() { + if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() { return Err(Error::BadRequest( ErrorKind::Forbidden, "Encryption has been disabled", @@ -70,7 +65,6 @@ pub async fn send_state_event_for_empty_key_route( } let event_id = send_state_event_for_key_helper( - &db, sender_user, &body.room_id, &body.event_type.to_string().into(), @@ -79,8 +73,6 @@ pub async fn send_state_event_for_empty_key_route( ) .await?; - db.flush()?; - let event_id = (*event_id).to_owned(); Ok(send_state_event::v3::Response { event_id }.into()) } @@ -91,7 +83,6 @@ pub async fn send_state_event_for_empty_key_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -99,9 +90,9 @@ pub async fn get_state_events_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -122,7 +113,7 @@ pub async fn get_state_events_route( } Ok(get_state_events::v3::Response { - room_state: db + room_state: services() .rooms .room_state_full(&body.room_id) .await? @@ -138,7 +129,6 @@ pub async fn get_state_events_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_key_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -146,9 +136,9 @@ pub async fn get_state_events_for_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -168,7 +158,7 @@ pub async fn get_state_events_for_key_route( )); } - let event = db + let event = services() .rooms .room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or(Error::BadRequest( @@ -188,7 +178,6 @@ pub async fn get_state_events_for_key_route( /// /// - If not joined: Only works if current room history visibility is world readable pub async fn get_state_events_for_empty_key_route( - db: DatabaseGuard, body: Ruma, ) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); @@ -196,9 +185,9 @@ pub async fn get_state_events_for_empty_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -218,7 +207,7 @@ pub async fn get_state_events_for_empty_key_route( )); } - let event = db + let event = services() .rooms .room_state_get(&body.room_id, &body.event_type, "")? .ok_or(Error::BadRequest( @@ -234,7 +223,6 @@ pub async fn get_state_events_for_empty_key_route( } async fn send_state_event_for_key_helper( - db: &Database, sender: &UserId, room_id: &RoomId, event_type: &StateEventType, @@ -255,8 +243,8 @@ async fn send_state_event_for_key_helper( } for alias in aliases { - if alias.server_name() != db.globals.server_name() - || db + if alias.server_name() != services().globals.server_name() + || services() .rooms .id_from_alias(&alias)? .filter(|room| room == room_id) // Make sure it's the right room @@ -272,7 +260,7 @@ async fn send_state_event_for_key_helper( } let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -281,7 +269,7 @@ async fn send_state_event_for_key_helper( ); let state_lock = mutex_state.lock().await; - let event_id = db.rooms.build_and_append_pdu( + let event_id = services().rooms.build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get()).expect("content is valid json"), @@ -291,7 +279,6 @@ async fn send_state_event_for_key_helper( }, sender_user, room_id, - db, &state_lock, )?; diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 0c294b7e..cc4ebf6e 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Database, Error, Result, Ruma, RumaResponse}; +use crate::{Error, Result, Ruma, RumaResponse, services}; use ruma::{ api::client::{ filter::{IncomingFilterDefinition, LazyLoadOptions}, @@ -55,16 +55,13 @@ use tracing::error; /// - Sync is handled in an async task, multiple requests from the same device with the same /// `since` will be cached pub async fn sync_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result> { let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let body = body.body; - let arc_db = Arc::new(db); - - let mut rx = match arc_db + let mut rx = match services() .globals .sync_receivers .write() @@ -77,7 +74,6 @@ pub async fn sync_events_route( v.insert((body.since.to_owned(), rx.clone())); tokio::spawn(sync_helper_wrapper( - Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), body, @@ -93,7 +89,6 @@ pub async fn sync_events_route( o.insert((body.since.clone(), rx.clone())); tokio::spawn(sync_helper_wrapper( - Arc::clone(&arc_db), sender_user.clone(), sender_device.clone(), body, @@ -127,7 +122,6 @@ pub async fn sync_events_route( } async fn sync_helper_wrapper( - db: Arc, sender_user: Box, sender_device: Box, body: sync_events::v3::IncomingRequest, @@ -136,7 +130,6 @@ async fn sync_helper_wrapper( let since = body.since.clone(); let r = sync_helper( - Arc::clone(&db), sender_user.clone(), sender_device.clone(), body, @@ -145,7 +138,7 @@ async fn sync_helper_wrapper( if let Ok((_, caching_allowed)) = r { if !caching_allowed { - match db + match services() .globals .sync_receivers .write() @@ -163,13 +156,10 @@ async fn sync_helper_wrapper( } } - drop(db); - let _ = tx.send(Some(r.map(|(r, _)| r))); } async fn sync_helper( - db: Arc, sender_user: Box, sender_device: Box, body: sync_events::v3::IncomingRequest, @@ -182,19 +172,19 @@ async fn sync_helper( }; // TODO: match body.set_presence { - db.rooms.edus.ping_presence(&sender_user)?; + services().rooms.edus.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them - let watcher = db.watch(&sender_user, &sender_device); + let watcher = services().watch(&sender_user, &sender_device); - let next_batch = db.globals.current_count()?; + let next_batch = services().globals.current_count()?; let next_batch_string = next_batch.to_string(); // Load filter let filter = match body.filter { None => IncomingFilterDefinition::default(), Some(IncomingFilter::FilterDefinition(filter)) => filter, - Some(IncomingFilter::FilterId(filter_id)) => db + Some(IncomingFilter::FilterId(filter_id)) => services() .users .get_filter(&sender_user, &filter_id)? .unwrap_or_default(), @@ -221,12 +211,12 @@ async fn sync_helper( // Look for device list updates of this account device_list_updates.extend( - db.users + services().users .keys_changed(&sender_user.to_string(), since, None) .filter_map(|r| r.ok()), ); - let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::>(); + let all_joined_rooms = services().rooms.rooms_joined(&sender_user).collect::>(); for room_id in all_joined_rooms { let room_id = room_id?; @@ -234,7 +224,7 @@ async fn sync_helper( // Get and drop the lock to wait for remaining operations to finish // This will make sure the we have all events until next_batch let mutex_insert = Arc::clone( - db.globals + services().globals .roomid_mutex_insert .write() .unwrap() @@ -247,8 +237,8 @@ async fn sync_helper( let timeline_pdus; let limited; - if db.rooms.last_timeline_count(&sender_user, &room_id)? > since { - let mut non_timeline_pdus = db + if services().rooms.last_timeline_count(&sender_user, &room_id)? > since { + let mut non_timeline_pdus = services() .rooms .pdus_until(&sender_user, &room_id, u64::MAX)? .filter_map(|r| { @@ -259,7 +249,7 @@ async fn sync_helper( r.ok() }) .take_while(|(pduid, _)| { - db.rooms + services().rooms .pdu_count(pduid) .map_or(false, |count| count > since) }); @@ -282,7 +272,7 @@ async fn sync_helper( } let send_notification_counts = !timeline_pdus.is_empty() - || db + || services() .rooms .edus .last_privateread_update(&sender_user, &room_id)? @@ -293,24 +283,24 @@ async fn sync_helper( timeline_users.insert(event.sender.as_str().to_owned()); } - db.rooms + services().rooms .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?; // Database queries: - let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? { + let current_shortstatehash = if let Some(s) = services().rooms.current_shortstatehash(&room_id)? { s } else { error!("Room {} has no state", room_id); continue; }; - let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; + let since_shortstatehash = services().rooms.get_token_shortstatehash(&room_id, since)?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { - let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0); - let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0); + let joined_member_count = services().rooms.room_joined_count(&room_id)?.unwrap_or(0); + let invited_member_count = services().rooms.room_invited_count(&room_id)?.unwrap_or(0); // Recalculate heroes (first 5 members) let mut heroes = Vec::new(); @@ -319,7 +309,7 @@ async fn sync_helper( // Go through all PDUs and for each member event, check if the user is still joined or // invited until we have 5 or we reach the end - for hero in db + for hero in services() .rooms .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus @@ -339,8 +329,8 @@ async fn sync_helper( if matches!( content.membership, MembershipState::Join | MembershipState::Invite - ) && (db.rooms.is_joined(&user_id, &room_id)? - || db.rooms.is_invited(&user_id, &room_id)?) + ) && (services().rooms.is_joined(&user_id, &room_id)? + || services().rooms.is_invited(&user_id, &room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -381,17 +371,17 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?; + let current_state_ids = services().rooms.state_full_ids(current_shortstatehash).await?; let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); let mut i = 0; for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services().rooms.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -408,7 +398,7 @@ async fn sync_helper( || body.full_state || timeline_users.contains(&state_key) { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -430,12 +420,12 @@ async fn sync_helper( } // Reset lazy loading because this is an initial sync - db.rooms + services().rooms .lazy_load_reset(&sender_user, &sender_device, &room_id)?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. - db.rooms.lazy_load_mark_sent( + services().rooms.lazy_load_mark_sent( &sender_user, &sender_device, &room_id, @@ -457,7 +447,7 @@ async fn sync_helper( // Incremental /sync let since_shortstatehash = since_shortstatehash.unwrap(); - let since_sender_member: Option = db + let since_sender_member: Option = services() .rooms .state_get( since_shortstatehash, @@ -477,12 +467,12 @@ async fn sync_helper( let mut lazy_loaded = HashSet::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?; - let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?; + let current_state_ids = services().rooms.state_full_ids(current_shortstatehash).await?; + let since_state_ids = services().rooms.state_full_ids(since_shortstatehash).await?; for (key, id) in current_state_ids { if body.full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match db.rooms.get_pdu(&id)? { + let pdu = match services().rooms.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -515,14 +505,14 @@ async fn sync_helper( continue; } - if !db.rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_load_was_sent_before( &sender_user, &sender_device, &room_id, &event.sender, )? || lazy_load_send_redundant { - if let Some(member_event) = db.rooms.room_state_get( + if let Some(member_event) = services().rooms.room_state_get( &room_id, &StateEventType::RoomMember, event.sender.as_str(), @@ -533,7 +523,7 @@ async fn sync_helper( } } - db.rooms.lazy_load_mark_sent( + services().rooms.lazy_load_mark_sent( &sender_user, &sender_device, &room_id, @@ -541,13 +531,13 @@ async fn sync_helper( next_batch, ); - let encrypted_room = db + let encrypted_room = services() .rooms .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? .is_some(); let since_encryption = - db.rooms + services().rooms .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?; // Calculations: @@ -580,7 +570,7 @@ async fn sync_helper( match new_membership { MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? { + if !share_encrypted_room(&sender_user, &user_id, &room_id)? { device_list_updates.insert(user_id); } } @@ -597,7 +587,7 @@ async fn sync_helper( if joined_since_last_sync && encrypted_room || new_encrypted_room { // If the user is in a new encrypted room, give them all joined users device_list_updates.extend( - db.rooms + services().rooms .room_members(&room_id) .flatten() .filter(|user_id| { @@ -606,7 +596,7 @@ async fn sync_helper( }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&db, &sender_user, user_id, &room_id) + !share_encrypted_room(&sender_user, user_id, &room_id) .unwrap_or(false) }), ); @@ -629,14 +619,14 @@ async fn sync_helper( // Look for device list updates in this room device_list_updates.extend( - db.users + services().users .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()), ); let notification_count = if send_notification_counts { Some( - db.rooms + services().rooms .notification_count(&sender_user, &room_id)? .try_into() .expect("notification count can't go that high"), @@ -647,7 +637,7 @@ async fn sync_helper( let highlight_count = if send_notification_counts { Some( - db.rooms + services().rooms .highlight_count(&sender_user, &room_id)? .try_into() .expect("highlight count can't go that high"), @@ -659,7 +649,7 @@ async fn sync_helper( let prev_batch = timeline_pdus .first() .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { - Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string())) + Ok(Some(services().rooms.pdu_count(pdu_id)?.to_string())) })?; let room_events: Vec<_> = timeline_pdus @@ -667,7 +657,7 @@ async fn sync_helper( .map(|(_, pdu)| pdu.to_sync_room_event()) .collect(); - let mut edus: Vec<_> = db + let mut edus: Vec<_> = services() .rooms .edus .readreceipts_since(&room_id, since) @@ -675,10 +665,10 @@ async fn sync_helper( .map(|(_, _, v)| v) .collect(); - if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { + if services().rooms.edus.last_typing_update(&room_id, &services().globals)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&db.rooms.edus.typings_all(&room_id)?) + &serde_json::to_string(&services().rooms.edus.typings_all(&room_id)?) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -686,12 +676,12 @@ async fn sync_helper( } // Save the state after this sync so we can send the correct state diff next sync - db.rooms + services().rooms .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; let joined_room = JoinedRoom { account_data: RoomAccountData { - events: db + events: services() .account_data .changes_since(Some(&room_id), &sender_user, since)? .into_iter() @@ -731,9 +721,9 @@ async fn sync_helper( // Take presence updates from this room for (user_id, presence) in - db.rooms + services().rooms .edus - .presence_since(&room_id, since, &db.rooms, &db.globals)? + .presence_since(&room_id, since)? { match presence_updates.entry(user_id) { Entry::Vacant(v) => { @@ -765,14 +755,14 @@ async fn sync_helper( } let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect(); + let all_left_rooms: Vec<_> = services().rooms.rooms_left(&sender_user).collect(); for result in all_left_rooms { let (room_id, left_state_events) = result?; { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( - db.globals + services().globals .roomid_mutex_insert .write() .unwrap() @@ -783,7 +773,7 @@ async fn sync_helper( drop(insert_lock); } - let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; + let left_count = services().rooms.get_left_count(&room_id, &sender_user)?; // Left before last sync if Some(since) >= left_count { @@ -807,14 +797,14 @@ async fn sync_helper( } let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect(); + let all_invited_rooms: Vec<_> = services().rooms.rooms_invited(&sender_user).collect(); for result in all_invited_rooms { let (room_id, invite_state_events) = result?; { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( - db.globals + services().globals .roomid_mutex_insert .write() .unwrap() @@ -825,7 +815,7 @@ async fn sync_helper( drop(insert_lock); } - let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; + let invite_count = services().rooms.get_invite_count(&room_id, &sender_user)?; // Invited before last sync if Some(since) >= invite_count { @@ -843,13 +833,13 @@ async fn sync_helper( } for user_id in left_encrypted_users { - let still_share_encrypted_room = db + let still_share_encrypted_room = services() .rooms .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .filter_map(|r| r.ok()) .filter_map(|other_room_id| { Some( - db.rooms + services().rooms .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), @@ -864,7 +854,7 @@ async fn sync_helper( } // Remove all to-device events the device received *last time* - db.users + services().users .remove_to_device_events(&sender_user, &sender_device, since)?; let response = sync_events::v3::Response { @@ -882,7 +872,7 @@ async fn sync_helper( .collect(), }, account_data: GlobalAccountData { - events: db + events: services() .account_data .changes_since(None, &sender_user, since)? .into_iter() @@ -897,9 +887,9 @@ async fn sync_helper( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?, + device_one_time_keys_count: services().users.count_one_time_keys(&sender_user, &sender_device)?, to_device: ToDevice { - events: db + events: services() .users .get_to_device_events(&sender_user, &sender_device)?, }, @@ -928,21 +918,19 @@ async fn sync_helper( } } -#[tracing::instrument(skip(db))] fn share_encrypted_room( - db: &Database, sender_user: &UserId, user_id: &UserId, ignore_room: &RoomId, ) -> Result { - Ok(db + Ok(services() .rooms .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? .filter_map(|r| r.ok()) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { Some( - db.rooms + services().rooms .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index 98d895cd..bbea2d58 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Result, Ruma}; +use crate::{Result, Ruma, services}; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -14,12 +14,11 @@ use std::collections::BTreeMap; /// /// - Inserts the tag into the tag event of the room account data. pub async fn update_tag_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut tags_event = db + let mut tags_event = services() .account_data .get( Some(&body.room_id), @@ -36,16 +35,13 @@ pub async fn update_tag_route( .tags .insert(body.tag.clone().into(), body.tag_info.clone()); - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, &tags_event, - &db.globals, )?; - db.flush()?; - Ok(create_tag::v3::Response {}) } @@ -55,12 +51,11 @@ pub async fn update_tag_route( /// /// - Removes the tag from the tag event of the room account data. pub async fn delete_tag_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut tags_event = db + let mut tags_event = services() .account_data .get( Some(&body.room_id), @@ -74,16 +69,13 @@ pub async fn delete_tag_route( }); tags_event.content.tags.remove(&body.tag.clone().into()); - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, &tags_event, - &db.globals, )?; - db.flush()?; - Ok(delete_tag::v3::Response {}) } @@ -93,13 +85,12 @@ pub async fn delete_tag_route( /// /// - Gets the tag event of the room account data. pub async fn get_tags_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); Ok(get_tags::v3::Response { - tags: db + tags: services() .account_data .get( Some(&body.room_id), diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index 51441dd4..3a2f6c09 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -1,7 +1,7 @@ use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; -use crate::{database::DatabaseGuard, Error, Result, Ruma}; +use crate::{Error, Result, Ruma, services}; use ruma::{ api::{ client::{error::ErrorKind, to_device::send_event_to_device}, @@ -14,14 +14,13 @@ use ruma::{ /// /// Send a to-device event to a set of client devices. pub async fn send_event_to_device_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); // Check if this is a new transaction id - if db + if services() .transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() @@ -31,13 +30,13 @@ pub async fn send_event_to_device_route( for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { - if target_user_id.server_name() != db.globals.server_name() { + if target_user_id.server_name() != services().globals.server_name() { let mut map = BTreeMap::new(); map.insert(target_device_id_maybe.clone(), event.clone()); let mut messages = BTreeMap::new(); messages.insert(target_user_id.clone(), map); - db.sending.send_reliable_edu( + services().sending.send_reliable_edu( target_user_id.server_name(), serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { @@ -48,14 +47,14 @@ pub async fn send_event_to_device_route( }, )) .expect("DirectToDevice EDU can be serialized"), - db.globals.next_count()?, + services().globals.next_count()?, )?; continue; } match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event( + DeviceIdOrAllDevices::DeviceId(target_device_id) => services().users.add_to_device_event( sender_user, target_user_id, &target_device_id, @@ -63,12 +62,11 @@ pub async fn send_event_to_device_route( event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, - &db.globals, )?, DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(target_user_id) { - db.users.add_to_device_event( + for target_device_id in services().users.all_device_ids(target_user_id) { + services().users.add_to_device_event( sender_user, target_user_id, &target_device_id?, @@ -76,7 +74,6 @@ pub async fn send_event_to_device_route( event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, - &db.globals, )?; } } @@ -85,10 +82,8 @@ pub async fn send_event_to_device_route( } // Save transaction id with empty data - db.transaction_ids + services().transaction_ids .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; - db.flush()?; - Ok(send_event_to_device::v3::Response {}) } diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index cac5a5fd..afd5d6b3 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -1,18 +1,17 @@ -use crate::{database::DatabaseGuard, utils, Error, Result, Ruma}; +use crate::{utils, Error, Result, Ruma, services}; use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` /// /// Sets the typing state of the sender user. pub async fn create_typing_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { use create_typing_event::v3::Typing; let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You are not in this room.", @@ -20,16 +19,15 @@ pub async fn create_typing_event_route( } if let Typing::Yes(duration) = body.state { - db.rooms.edus.typing_add( + services().rooms.edus.typing_add( sender_user, &body.room_id, duration.as_millis() as u64 + utils::millis_since_unix_epoch(), - &db.globals, )?; } else { - db.rooms + services().rooms .edus - .typing_remove(sender_user, &body.room_id, &db.globals)?; + .typing_remove(sender_user, &body.room_id)?; } Ok(create_typing_event::v3::Response {}) diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index 349c1399..60b4e2fa 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Result, Ruma}; +use crate::{Result, Ruma, services}; use ruma::{ api::client::user_directory::search_users, events::{ @@ -14,20 +14,19 @@ use ruma::{ /// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) /// and don't share a room with the sender pub async fn search_users_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let limit = u64::from(body.limit) as usize; - let mut users = db.users.iter().filter_map(|user_id| { + let mut users = services().users.iter().filter_map(|user_id| { // Filter out buggy users (they should not exist, but you never know...) let user_id = user_id.ok()?; let user = search_users::v3::User { user_id: user_id.clone(), - display_name: db.users.displayname(&user_id).ok()?, - avatar_url: db.users.avatar_url(&user_id).ok()?, + display_name: services().users.displayname(&user_id).ok()?, + avatar_url: services().users.avatar_url(&user_id).ok()?, }; let user_id_matches = user @@ -50,11 +49,11 @@ pub async fn search_users_route( } let user_is_in_public_rooms = - db.rooms + services().rooms .rooms_joined(&user_id) .filter_map(|r| r.ok()) .any(|room| { - db.rooms + services().rooms .room_state_get(&room, &StateEventType::RoomJoinRules, "") .map_or(false, |event| { event.map_or(false, |event| { @@ -70,7 +69,7 @@ pub async fn search_users_route( return Some(user); } - let user_is_in_shared_rooms = db + let user_is_in_shared_rooms = services() .rooms .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) .ok()? diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index 7e9de31e..2a804f97 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,4 +1,4 @@ -use crate::{database::DatabaseGuard, Result, Ruma}; +use crate::{Result, Ruma, services}; use hmac::{Hmac, Mac, NewMac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; @@ -10,16 +10,15 @@ type HmacSha1 = Hmac; /// /// TODO: Returns information about the recommended turn server. pub async fn turn_server_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let turn_secret = db.globals.turn_secret(); + let turn_secret = services().globals.turn_secret(); let (username, password) = if !turn_secret.is_empty() { let expiry = SecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(db.globals.turn_ttl()), + SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()), ) .expect("time is valid"); @@ -34,15 +33,15 @@ pub async fn turn_server_route( (username, password) } else { ( - db.globals.turn_username().clone(), - db.globals.turn_password().clone(), + services().globals.turn_username().clone(), + services().globals.turn_password().clone(), ) }; Ok(get_turn_server_info::v3::Response { username, password, - uris: db.globals.turn_uris().to_vec(), - ttl: Duration::from_secs(db.globals.turn_ttl()), + uris: services().globals.turn_uris().to_vec(), + ttl: Duration::from_secs(services().globals.turn_ttl()), }) } diff --git a/src/api/mod.rs b/src/api/mod.rs new file mode 100644 index 00000000..68589be7 --- /dev/null +++ b/src/api/mod.rs @@ -0,0 +1,4 @@ +pub mod client_server; +pub mod server_server; +pub mod appservice_server; +pub mod ruma_wrapper; diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 45e9d9a8..babf2a74 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -24,7 +24,7 @@ use serde::Deserialize; use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; -use crate::{database::DatabaseGuard, server_server, Error, Result}; +use crate::{Error, Result, api::server_server, services}; #[async_trait] impl FromRequest for Ruma @@ -44,7 +44,6 @@ where } let metadata = T::METADATA; - let db = DatabaseGuard::from_request(req).await?; let auth_header = Option::>>::from_request(req).await?; let path_params = Path::>::from_request(req).await?; @@ -71,7 +70,7 @@ where let mut json_body = serde_json::from_slice::(&body).ok(); - let appservices = db.appservice.all().unwrap(); + let appservices = services().appservice.all().unwrap(); let appservice_registration = appservices.iter().find(|(_id, registration)| { registration .get("as_token") @@ -91,14 +90,14 @@ where .unwrap() .as_str() .unwrap(), - db.globals.server_name(), + services().globals.server_name(), ) .unwrap() }, |s| UserId::parse(s).unwrap(), ); - if !db.users.exists(&user_id).unwrap() { + if !services().users.exists(&user_id).unwrap() { return Err(Error::BadRequest( ErrorKind::Forbidden, "User does not exist.", @@ -124,7 +123,7 @@ where } }; - match db.users.find_from_token(token).unwrap() { + match services().users.find_from_token(token).unwrap() { None => { return Err(Error::BadRequest( ErrorKind::UnknownToken { soft_logout: false }, @@ -185,7 +184,7 @@ where ( "destination".to_owned(), CanonicalJsonValue::String( - db.globals.server_name().as_str().to_owned(), + services().globals.server_name().as_str().to_owned(), ), ), ( @@ -199,7 +198,6 @@ where }; let keys_result = server_server::fetch_signing_keys( - &db, &x_matrix.origin, vec![x_matrix.key.to_owned()], ) @@ -251,7 +249,7 @@ where if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { let user_id = sender_user.clone().unwrap_or_else(|| { - UserId::parse_with_server_name("", db.globals.server_name()) + UserId::parse_with_server_name("", services().globals.server_name()) .expect("we know this is valid") }); @@ -261,7 +259,7 @@ where .and_then(|auth| auth.get("session")) .and_then(|session| session.as_str()) .and_then(|session| { - db.uiaa.get_uiaa_request( + services().uiaa.get_uiaa_request( &user_id, &sender_device.clone().unwrap_or_else(|| "".into()), session, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index f60f735a..776777d1 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1,8 +1,6 @@ use crate::{ - client_server::{self, claim_keys_helper, get_keys_helper}, - database::{rooms::CompressedStateEvent, DatabaseGuard}, - pdu::EventHash, - utils, Database, Error, PduEvent, Result, Ruma, + api::client_server::{self, claim_keys_helper, get_keys_helper}, + utils, Error, PduEvent, Result, Ruma, services, service::pdu::{gen_event_id_canonical_json, PduBuilder}, }; use axum::{response::IntoResponse, Json}; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -126,22 +124,21 @@ impl FedDest { } } -#[tracing::instrument(skip(globals, request))] +#[tracing::instrument(skip(request))] pub(crate) async fn send_request( - globals: &crate::database::globals::Globals, destination: &ServerName, request: T, ) -> Result where T: Debug, { - if !globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } let mut write_destination_to_cache = false; - let cached_result = globals + let cached_result = services().globals .actual_destination_cache .read() .unwrap() @@ -153,7 +150,7 @@ where } else { write_destination_to_cache = true; - let result = find_actual_destination(globals, destination).await; + let result = find_actual_destination(destination).await; (result.0, result.1.into_uri_string()) }; @@ -194,15 +191,15 @@ where .to_string() .into(), ); - request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); + request_map.insert("origin".to_owned(), services().globals.server_name().as_str().into()); request_map.insert("destination".to_owned(), destination.as_str().into()); let mut request_json = serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); ruma::signatures::sign_json( - globals.server_name().as_str(), - globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut request_json, ) .expect("our request json is what ruma expects"); @@ -227,7 +224,7 @@ where AUTHORIZATION, HeaderValue::from_str(&format!( "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - globals.server_name(), + services().globals.server_name(), s.0, s.1 )) @@ -241,7 +238,7 @@ where let url = reqwest_request.url().clone(); - let response = globals.federation_client().execute(reqwest_request).await; + let response = services().globals.federation_client().execute(reqwest_request).await; match response { Ok(mut response) => { @@ -281,7 +278,7 @@ where if status == 200 { let response = T::IncomingResponse::try_from_http_response(http_response); if response.is_ok() && write_destination_to_cache { - globals.actual_destination_cache.write().unwrap().insert( + services().globals.actual_destination_cache.write().unwrap().insert( Box::::from(destination), (actual_destination, host), ); @@ -332,9 +329,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { /// Returns: actual_destination, host header /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification -#[tracing::instrument(skip(globals))] async fn find_actual_destination( - globals: &crate::database::globals::Globals, destination: &'_ ServerName, ) -> (FedDest, FedDest) { let destination_str = destination.as_str().to_owned(); @@ -350,7 +345,7 @@ async fn find_actual_destination( let (host, port) = destination_str.split_at(pos); FedDest::Named(host.to_owned(), port.to_owned()) } else { - match request_well_known(globals, destination.as_str()).await { + match request_well_known(destination.as_str()).await { // 3: A .well-known file is available Some(delegated_hostname) => { hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); @@ -364,17 +359,17 @@ async fn find_actual_destination( } else { // Delegated hostname has no port in this branch if let Some(hostname_override) = - query_srv_record(globals, &delegated_hostname).await + query_srv_record(&delegated_hostname).await { // 3.3: SRV lookup successful let force_port = hostname_override.port(); - if let Ok(override_ip) = globals + if let Ok(override_ip) = services().globals .dns_resolver() .lookup_ip(hostname_override.hostname()) .await { - globals.tls_name_override.write().unwrap().insert( + services().globals.tls_name_override.write().unwrap().insert( delegated_hostname.clone(), ( override_ip.iter().collect(), @@ -400,17 +395,17 @@ async fn find_actual_destination( } // 4: No .well-known or an error occured None => { - match query_srv_record(globals, &destination_str).await { + match query_srv_record(&destination_str).await { // 4: SRV record found Some(hostname_override) => { let force_port = hostname_override.port(); - if let Ok(override_ip) = globals + if let Ok(override_ip) = services().globals .dns_resolver() .lookup_ip(hostname_override.hostname()) .await { - globals.tls_name_override.write().unwrap().insert( + services().globals.tls_name_override.write().unwrap().insert( hostname.clone(), (override_ip.iter().collect(), force_port.unwrap_or(8448)), ); @@ -448,12 +443,10 @@ async fn find_actual_destination( (actual_destination, hostname) } -#[tracing::instrument(skip(globals))] async fn query_srv_record( - globals: &crate::database::globals::Globals, hostname: &'_ str, ) -> Option { - if let Ok(Some(host_port)) = globals + if let Ok(Some(host_port)) = services().globals .dns_resolver() .srv_lookup(format!("_matrix._tcp.{}", hostname)) .await @@ -472,13 +465,11 @@ async fn query_srv_record( } } -#[tracing::instrument(skip(globals))] async fn request_well_known( - globals: &crate::database::globals::Globals, destination: &str, ) -> Option { let body: serde_json::Value = serde_json::from_str( - &globals + &services().globals .default_client() .get(&format!( "https://{}/.well-known/matrix/server", @@ -499,10 +490,9 @@ async fn request_well_known( /// /// Get version information on this server. pub async fn get_server_version_route( - db: DatabaseGuard, _body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -521,24 +511,24 @@ pub async fn get_server_version_route( /// - Matrix does not support invalidating public keys, so the key returned by this will be valid /// forever. // Response type for this endpoint is Json because we need to calculate a signature for the response -pub async fn get_server_keys_route(db: DatabaseGuard) -> Result { - if !db.globals.allow_federation() { +pub async fn get_server_keys_route() -> Result { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); verify_keys.insert( - format!("ed25519:{}", db.globals.keypair().version()) + format!("ed25519:{}", services().globals.keypair().version()) .try_into() .expect("found invalid server signing keys in DB"), VerifyKey { - key: Base64::new(db.globals.keypair().public_key().to_vec()), + key: Base64::new(services().globals.keypair().public_key().to_vec()), }, ); let mut response = serde_json::from_slice( get_server_keys::v2::Response { server_key: Raw::new(&ServerSigningKeys { - server_name: db.globals.server_name().to_owned(), + server_name: services().globals.server_name().to_owned(), verify_keys, old_verify_keys: BTreeMap::new(), signatures: BTreeMap::new(), @@ -556,8 +546,8 @@ pub async fn get_server_keys_route(db: DatabaseGuard) -> Result Result impl IntoResponse { - get_server_keys_route(db).await +pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { + get_server_keys_route().await } /// # `POST /_matrix/federation/v1/publicRooms` /// /// Lists the public rooms on this server. pub async fn get_public_rooms_filtered_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } let response = client_server::get_public_rooms_filtered_helper( - &db, None, body.limit, body.since.as_deref(), @@ -608,15 +596,13 @@ pub async fn get_public_rooms_filtered_route( /// /// Lists the public rooms on this server. pub async fn get_public_rooms_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } let response = client_server::get_public_rooms_filtered_helper( - &db, None, body.limit, body.since.as_deref(), @@ -637,10 +623,9 @@ pub async fn get_public_rooms_route( /// /// Push EDUs and PDUs to this server. pub async fn send_transaction_message_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -663,7 +648,7 @@ pub async fn send_transaction_message_route( for pdu in &body.pdus { // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { + let (event_id, value) = match gen_event_id_canonical_json(pdu) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -684,10 +669,10 @@ pub async fn send_transaction_message_route( } }; - acl_check(&sender_servername, &room_id, &db)?; + acl_check(&sender_servername, &room_id)?; let mutex = Arc::clone( - db.globals + services().globals .roomid_mutex_federation .write() .unwrap() @@ -698,13 +683,12 @@ pub async fn send_transaction_message_route( let start_time = Instant::now(); resolved_map.insert( event_id.clone(), - handle_incoming_pdu( + services().rooms.event_handler.handle_incoming_pdu( &sender_servername, &event_id, &room_id, value, true, - &db, &pub_key_map, ) .await @@ -743,7 +727,7 @@ pub async fn send_transaction_message_route( .event_ids .iter() .filter_map(|id| { - db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) + services().rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) }) .max_by_key(|(_, count)| *count) { @@ -760,11 +744,10 @@ pub async fn send_transaction_message_route( content: ReceiptEventContent(receipt_content), room_id: room_id.clone(), }; - db.rooms.edus.readreceipt_update( + services().rooms.edus.readreceipt_update( &user_id, &room_id, event, - &db.globals, )?; } else { // TODO fetch missing events @@ -774,26 +757,24 @@ pub async fn send_transaction_message_route( } } Edu::Typing(typing) => { - if db.rooms.is_joined(&typing.user_id, &typing.room_id)? { + if services().rooms.is_joined(&typing.user_id, &typing.room_id)? { if typing.typing { - db.rooms.edus.typing_add( + services().rooms.edus.typing_add( &typing.user_id, &typing.room_id, 3000 + utils::millis_since_unix_epoch(), - &db.globals, )?; } else { - db.rooms.edus.typing_remove( + services().rooms.edus.typing_remove( &typing.user_id, &typing.room_id, - &db.globals, )?; } } } Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { - db.users - .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; + services().users + .mark_device_key_update(&user_id)?; } Edu::DirectToDevice(DirectDeviceContent { sender, @@ -802,7 +783,7 @@ pub async fn send_transaction_message_route( messages, }) => { // Check if this is a new transaction id - if db + if services() .transaction_ids .existing_txnid(&sender, None, &message_id)? .is_some() @@ -814,7 +795,7 @@ pub async fn send_transaction_message_route( for (target_device_id_maybe, event) in map { match target_device_id_maybe { DeviceIdOrAllDevices::DeviceId(target_device_id) => { - db.users.add_to_device_event( + services().users.add_to_device_event( &sender, target_user_id, target_device_id, @@ -825,13 +806,12 @@ pub async fn send_transaction_message_route( "Event is invalid", ) })?, - &db.globals, )? } DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(target_user_id) { - db.users.add_to_device_event( + for target_device_id in services().users.all_device_ids(target_user_id) { + services().users.add_to_device_event( &sender, target_user_id, &target_device_id?, @@ -842,7 +822,6 @@ pub async fn send_transaction_message_route( "Event is invalid", ) })?, - &db.globals, )?; } } @@ -851,7 +830,7 @@ pub async fn send_transaction_message_route( } // Save transaction id with empty data - db.transaction_ids + services().transaction_ids .add_txnid(&sender, None, &message_id, &[])?; } Edu::SigningKeyUpdate(SigningKeyUpdateContent { @@ -863,13 +842,11 @@ pub async fn send_transaction_message_route( continue; } if let Some(master_key) = master_key { - db.users.add_cross_signing_keys( + services().users.add_cross_signing_keys( &user_id, &master_key, &self_signing_key, &None, - &db.rooms, - &db.globals, )?; } } @@ -877,8 +854,6 @@ pub async fn send_transaction_message_route( } } - db.flush()?; - Ok(send_transaction_message::v1::Response { pdus: resolved_map }) } @@ -886,14 +861,13 @@ pub async fn send_transaction_message_route( /// fetch them from the server and save to our DB. #[tracing::instrument(skip_all)] pub(crate) async fn fetch_signing_keys( - db: &Database, origin: &ServerName, signature_ids: Vec, ) -> Result> { let contains_all_ids = |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - let permit = db + let permit = services() .globals .servername_ratelimiter .read() @@ -904,7 +878,7 @@ pub(crate) async fn fetch_signing_keys( let permit = match permit { Some(p) => p, None => { - let mut write = db.globals.servername_ratelimiter.write().unwrap(); + let mut write = services().globals.servername_ratelimiter.write().unwrap(); let s = Arc::clone( write .entry(origin.to_owned()) @@ -916,7 +890,7 @@ pub(crate) async fn fetch_signing_keys( } .await; - let back_off = |id| match db + let back_off = |id| match services() .globals .bad_signature_ratelimiter .write() @@ -929,7 +903,7 @@ pub(crate) async fn fetch_signing_keys( hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; - if let Some((time, tries)) = db + if let Some((time, tries)) = services() .globals .bad_signature_ratelimiter .read() @@ -950,7 +924,7 @@ pub(crate) async fn fetch_signing_keys( trace!("Loading signing keys for {}", origin); - let mut result: BTreeMap<_, _> = db + let mut result: BTreeMap<_, _> = services() .globals .signing_keys_for(origin)? .into_iter() @@ -963,14 +937,14 @@ pub(crate) async fn fetch_signing_keys( debug!("Fetching signing keys for {} over federation", origin); - if let Some(server_key) = db + if let Some(server_key) = services() .sending - .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) + .send_federation_request(origin, get_server_keys::v2::Request::new()) .await .ok() .and_then(|resp| resp.server_key.deserialize().ok()) { - db.globals.add_signing_key(origin, server_key.clone())?; + services().globals.add_signing_key(origin, server_key.clone())?; result.extend( server_key @@ -990,12 +964,11 @@ pub(crate) async fn fetch_signing_keys( } } - for server in db.globals.trusted_servers() { + for server in services().globals.trusted_servers() { debug!("Asking {} for {}'s signing key", server, origin); - if let Some(server_keys) = db + if let Some(server_keys) = services() .sending .send_federation_request( - &db.globals, server, get_remote_server_keys::v2::Request::new( origin, @@ -1018,7 +991,7 @@ pub(crate) async fn fetch_signing_keys( { trace!("Got signing keys: {:?}", server_keys); for k in server_keys { - db.globals.add_signing_key(origin, k.clone())?; + services().globals.add_signing_key(origin, k.clone())?; result.extend( k.verify_keys .into_iter() @@ -1047,11 +1020,10 @@ pub(crate) async fn fetch_signing_keys( )) } -#[tracing::instrument(skip(starting_events, db))] +#[tracing::instrument(skip(starting_events))] pub(crate) async fn get_auth_chain<'a>( room_id: &RoomId, starting_events: Vec>, - db: &'a Database, ) -> Result> + 'a> { const NUM_BUCKETS: usize = 50; @@ -1059,7 +1031,7 @@ pub(crate) async fn get_auth_chain<'a>( let mut i = 0; for id in starting_events { - let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; + let short = services().rooms.get_or_create_shorteventid(&id)?; let bucket_id = (short % NUM_BUCKETS as u64) as usize; buckets[bucket_id].insert((short, id.clone())); i += 1; @@ -1078,7 +1050,7 @@ pub(crate) async fn get_auth_chain<'a>( } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { + if let Some(cached) = services().rooms.get_auth_chain_from_cache(&chunk_key)? { hits += 1; full_auth_chain.extend(cached.iter().copied()); continue; @@ -1090,13 +1062,13 @@ pub(crate) async fn get_auth_chain<'a>( let mut misses2 = 0; let mut i = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { + if let Some(cached) = services().rooms.get_auth_chain_from_cache(&[sevent_id])? { hits2 += 1; chunk_cache.extend(cached.iter().copied()); } else { misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); - db.rooms + let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id)?); + services().rooms .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; println!( "cache missed event {} with auth chain len {}", @@ -1118,7 +1090,7 @@ pub(crate) async fn get_auth_chain<'a>( misses2 ); let chunk_cache = Arc::new(chunk_cache); - db.rooms + services().rooms .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; full_auth_chain.extend(chunk_cache.iter()); } @@ -1132,28 +1104,27 @@ pub(crate) async fn get_auth_chain<'a>( Ok(full_auth_chain .into_iter() - .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) + .filter_map(move |sid| services().rooms.get_eventid_from_short(sid).ok())) } -#[tracing::instrument(skip(event_id, db))] +#[tracing::instrument(skip(event_id))] fn get_auth_chain_inner( room_id: &RoomId, event_id: &EventId, - db: &Database, ) -> Result> { let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { - match db.rooms.get_pdu(&event_id) { + match services().rooms.get_pdu(&event_id) { Ok(Some(pdu)) => { if pdu.room_id != room_id { return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); } for auth_event in &pdu.auth_events { - let sauthevent = db + let sauthevent = services() .rooms - .get_or_create_shorteventid(auth_event, &db.globals)?; + .get_or_create_shorteventid(auth_event)?; if !found.contains(&sauthevent) { found.insert(sauthevent); @@ -1179,10 +1150,9 @@ fn get_auth_chain_inner( /// /// - Only works if a user of this server is currently invited or joined the room pub async fn get_event_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1191,7 +1161,7 @@ pub async fn get_event_route( .as_ref() .expect("server is authenticated"); - let event = db + let event = services() .rooms .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1204,7 +1174,7 @@ pub async fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !db.rooms.server_in_room(sender_servername, room_id)? { + if !services().rooms.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", @@ -1212,7 +1182,7 @@ pub async fn get_event_route( } Ok(get_event::v1::Response { - origin: db.globals.server_name().to_owned(), + origin: services().globals.server_name().to_owned(), origin_server_ts: MilliSecondsSinceUnixEpoch::now(), pdu: PduEvent::convert_to_outgoing_federation_event(event), }) @@ -1222,10 +1192,9 @@ pub async fn get_event_route( /// /// Retrieves events that the sender is missing. pub async fn get_missing_events_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1234,21 +1203,21 @@ pub async fn get_missing_events_route( .as_ref() .expect("server is authenticated"); - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", )); } - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); let mut i = 0; while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { - if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { + if let Some(pdu) = services().rooms.get_pdu_json(&queued_events[i])? { let room_id_str = pdu .get("room_id") .and_then(|val| val.as_str()) @@ -1295,10 +1264,9 @@ pub async fn get_missing_events_route( /// /// - This does not include the event itself pub async fn get_event_authorization_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1307,16 +1275,16 @@ pub async fn get_event_authorization_route( .as_ref() .expect("server is authenticated"); - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; - let event = db + let event = services() .rooms .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1329,11 +1297,11 @@ pub async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?; + let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) + .filter_map(|id| services().rooms.get_pdu_json(&id).ok()?) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -1343,10 +1311,9 @@ pub async fn get_event_authorization_route( /// /// Retrieves the current state of the room. pub async fn get_room_state_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1355,16 +1322,16 @@ pub async fn get_room_state_route( .as_ref() .expect("server is authenticated"); - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; - let shortstatehash = db + let shortstatehash = services() .rooms .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( @@ -1372,25 +1339,25 @@ pub async fn get_room_state_route( "Pdu state not found.", ))?; - let pdus = db + let pdus = services() .rooms .state_full_ids(shortstatehash) .await? .into_iter() .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&id).unwrap().unwrap(), + services().rooms.get_pdu_json(&id).unwrap().unwrap(), ) }) .collect(); let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids .map(|id| { - db.rooms.get_pdu_json(&id).map(|maybe_json| { + services().rooms.get_pdu_json(&id).map(|maybe_json| { PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) }) }) @@ -1404,10 +1371,9 @@ pub async fn get_room_state_route( /// /// Retrieves the current state of the room. pub async fn get_room_state_ids_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1416,16 +1382,16 @@ pub async fn get_room_state_ids_route( .as_ref() .expect("server is authenticated"); - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; - let shortstatehash = db + let shortstatehash = services() .rooms .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( @@ -1433,7 +1399,7 @@ pub async fn get_room_state_ids_route( "Pdu state not found.", ))?; - let pdu_ids = db + let pdu_ids = services() .rooms .state_full_ids(shortstatehash) .await? @@ -1442,7 +1408,7 @@ pub async fn get_room_state_ids_route( .collect(); let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?; + get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), @@ -1454,14 +1420,13 @@ pub async fn get_room_state_ids_route( /// /// Creates a join template. pub async fn create_join_event_template_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - if !db.rooms.exists(&body.room_id)? { + if !services().rooms.exists(&body.room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server.", @@ -1473,11 +1438,21 @@ pub async fn create_join_event_template_route( .as_ref() .expect("server is authenticated"); - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(body.room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; // TODO: Conduit does not implement restricted join rules yet, we always reject let join_rules_event = - db.rooms + services().rooms .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; let join_rules_event_content: Option = join_rules_event @@ -1502,7 +1477,8 @@ pub async fn create_join_event_template_route( } } - if !body.ver.contains(&room_version_id) { + let room_version_id = services().rooms.state.get_room_version(&body.room_id); + if !body.ver.contains(room_version_id) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: room_version_id, @@ -1523,10 +1499,15 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let state_key = body.user_id.to_string(); - let kind = StateEventType::RoomMember; + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event(PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, &body.user_id, &body.room_id, &state_lock); - let (pdu, pdu_json) = create_hash_and_sign_event(); + drop(state_lock); Ok(prepare_join_event::v1::Response { room_version: Some(room_version_id), @@ -1535,26 +1516,25 @@ pub async fn create_join_event_template_route( } async fn create_join_event( - db: &DatabaseGuard, sender_servername: &ServerName, room_id: &RoomId, pdu: &RawJsonValue, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - if !db.rooms.exists(room_id)? { + if !services().rooms.exists(room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server.", )); } - acl_check(sender_servername, room_id, db)?; + acl_check(sender_servername, room_id)?; // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = db + let join_rules_event = services() .rooms .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; @@ -1581,7 +1561,7 @@ async fn create_join_event( } // We need to return the state prior to joining, let's keep a reference to that here - let shortstatehash = db + let shortstatehash = services() .rooms .current_shortstatehash(room_id)? .ok_or(Error::BadRequest( @@ -1593,7 +1573,7 @@ async fn create_join_event( // let mut auth_cache = EventMap::new(); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu, &db) { + let (event_id, value) = match gen_event_id_canonical_json(pdu) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -1614,7 +1594,7 @@ async fn create_join_event( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; let mutex = Arc::clone( - db.globals + services().globals .roomid_mutex_federation .write() .unwrap() @@ -1622,7 +1602,7 @@ async fn create_join_event( .or_default(), ); let mutex_lock = mutex.lock().await; - let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map) + let pdu_id = services().rooms.event_handler.handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) .await .map_err(|e| { warn!("Error while handling incoming send join PDU: {}", e); @@ -1637,32 +1617,29 @@ async fn create_join_event( ))?; drop(mutex_lock); - let state_ids = db.rooms.state_full_ids(shortstatehash).await?; + let state_ids = services().rooms.state_full_ids(shortstatehash).await?; let auth_chain_ids = get_auth_chain( room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), - db, ) .await?; - let servers = db + let servers = services() .rooms .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); - - db.sending.send_pdu(servers, &pdu_id)?; + .filter(|server| &**server != services().globals.server_name()); - db.flush()?; + services().sending.send_pdu(servers, &pdu_id)?; Ok(RoomState { auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) + .filter_map(|id| services().rooms.get_pdu_json(&id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), state: state_ids .iter() - .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten()) + .filter_map(|(_, id)| services().rooms.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -1672,7 +1649,6 @@ async fn create_join_event( /// /// Submits a signed join event. pub async fn create_join_event_v1_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_servername = body @@ -1680,7 +1656,7 @@ pub async fn create_join_event_v1_route( .as_ref() .expect("server is authenticated"); - let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; + let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v1::Response { room_state }) } @@ -1689,7 +1665,6 @@ pub async fn create_join_event_v1_route( /// /// Submits a signed join event. pub async fn create_join_event_v2_route( - db: DatabaseGuard, body: Ruma, ) -> Result { let sender_servername = body @@ -1697,7 +1672,7 @@ pub async fn create_join_event_v2_route( .as_ref() .expect("server is authenticated"); - let room_state = create_join_event(&db, sender_servername, &body.room_id, &body.pdu).await?; + let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; Ok(create_join_event::v2::Response { room_state }) } @@ -1706,10 +1681,9 @@ pub async fn create_join_event_v2_route( /// /// Invites a remote user to a room. pub async fn create_invite_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1718,9 +1692,9 @@ pub async fn create_invite_route( .as_ref() .expect("server is authenticated"); - acl_check(sender_servername, &body.room_id, &db)?; + acl_check(sender_servername, &body.room_id)?; - if !db.rooms.is_supported_version(&db, &body.room_version) { + if !services().rooms.is_supported_version(&body.room_version) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), @@ -1733,8 +1707,8 @@ pub async fn create_invite_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut signed_event, &body.room_version, ) @@ -1793,20 +1767,17 @@ pub async fn create_invite_route( invite_state.push(pdu.to_stripped_state_event()); // If the room already exists, the remote server will notify us about the join via /send - if !db.rooms.exists(&pdu.room_id)? { - db.rooms.update_membership( + if !services().rooms.exists(&pdu.room_id)? { + services().rooms.update_membership( &body.room_id, &invited_user, MembershipState::Invite, &sender, Some(invite_state), - &db, true, )?; } - db.flush()?; - Ok(create_invite::v2::Response { event: PduEvent::convert_to_outgoing_federation_event(signed_event), }) @@ -1816,10 +1787,9 @@ pub async fn create_invite_route( /// /// Gets information on all devices of the user. pub async fn get_devices_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1830,19 +1800,19 @@ pub async fn get_devices_route( Ok(get_devices::v1::Response { user_id: body.user_id.clone(), - stream_id: db + stream_id: services() .users .get_devicelist_version(&body.user_id)? .unwrap_or(0) .try_into() .expect("version will not grow that large"), - devices: db + devices: services() .users .all_devices_metadata(&body.user_id) .filter_map(|r| r.ok()) .filter_map(|metadata| { Some(UserDevice { - keys: db + keys: services() .users .get_device_keys(&body.user_id, &metadata.device_id) .ok()??, @@ -1851,10 +1821,10 @@ pub async fn get_devices_route( }) }) .collect(), - master_key: db + master_key: services() .users .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, - self_signing_key: db + self_signing_key: services() .users .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, }) @@ -1864,14 +1834,13 @@ pub async fn get_devices_route( /// /// Resolve a room alias to a room id. pub async fn get_room_information_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - let room_id = db + let room_id = services() .rooms .id_from_alias(&body.room_alias)? .ok_or(Error::BadRequest( @@ -1881,7 +1850,7 @@ pub async fn get_room_information_route( Ok(get_room_information::v1::Response { room_id, - servers: vec![db.globals.server_name().to_owned()], + servers: vec![services().globals.server_name().to_owned()], }) } @@ -1889,10 +1858,9 @@ pub async fn get_room_information_route( /// /// Gets information on a profile. pub async fn get_profile_information_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1901,17 +1869,17 @@ pub async fn get_profile_information_route( let mut blurhash = None; match &body.field { - Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, + Some(ProfileField::DisplayName) => displayname = services().users.displayname(&body.user_id)?, Some(ProfileField::AvatarUrl) => { - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)? + avatar_url = services().users.avatar_url(&body.user_id)?; + blurhash = services().users.blurhash(&body.user_id)? } // TODO: what to do with custom Some(_) => {} None => { - displayname = db.users.displayname(&body.user_id)?; - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)?; + displayname = services().users.displayname(&body.user_id)?; + avatar_url = services().users.avatar_url(&body.user_id)?; + blurhash = services().users.blurhash(&body.user_id)?; } } @@ -1926,10 +1894,9 @@ pub async fn get_profile_information_route( /// /// Gets devices and identity keys for the given users. pub async fn get_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } @@ -1937,12 +1904,9 @@ pub async fn get_keys_route( None, &body.device_keys, |u| Some(u.server_name()) == body.sender_servername.as_deref(), - &db, ) .await?; - db.flush()?; - Ok(get_keys::v1::Response { device_keys: result.device_keys, master_keys: result.master_keys, @@ -1954,16 +1918,13 @@ pub async fn get_keys_route( /// /// Claims one-time keys. pub async fn claim_keys_route( - db: DatabaseGuard, body: Ruma, ) -> Result { - if !db.globals.allow_federation() { + if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - let result = claim_keys_helper(&body.one_time_keys, &db).await?; - - db.flush()?; + let result = claim_keys_helper(&body.one_time_keys).await?; Ok(claim_keys::v1::Response { one_time_keys: result.one_time_keys, @@ -1974,7 +1935,6 @@ pub async fn claim_keys_route( pub(crate) async fn fetch_required_signing_keys( event: &BTreeMap, pub_key_map: &RwLock>>, - db: &Database, ) -> Result<()> { let signatures = event .get("signatures") @@ -1996,7 +1956,6 @@ pub(crate) async fn fetch_required_signing_keys( let signature_ids = signature_object.keys().cloned().collect::>(); let fetch_res = fetch_signing_keys( - db, signature_server.as_str().try_into().map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?, @@ -2028,7 +1987,6 @@ fn get_server_keys_from_cache( servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, room_version: &RoomVersionId, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, - db: &Database, ) -> Result<()> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); @@ -2043,7 +2001,7 @@ fn get_server_keys_from_cache( let event_id = <&EventId>::try_from(event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); - if let Some((time, tries)) = db + if let Some((time, tries)) = services() .globals .bad_event_ratelimiter .read() @@ -2092,7 +2050,7 @@ fn get_server_keys_from_cache( trace!("Loading signing keys for {}", origin); - let result: BTreeMap<_, _> = db + let result: BTreeMap<_, _> = services() .globals .signing_keys_for(origin)? .into_iter() @@ -2114,7 +2072,6 @@ pub(crate) async fn fetch_join_signing_keys( event: &create_join_event::v2::Response, room_version: &RoomVersionId, pub_key_map: &RwLock>>, - db: &Database, ) -> Result<()> { let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = BTreeMap::new(); @@ -2127,10 +2084,10 @@ pub(crate) async fn fetch_join_signing_keys( // Try to fetch keys, failure is okay // Servers we couldn't find in the cache will be added to `servers` for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); } for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); + let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); } drop(pkm); @@ -2141,12 +2098,11 @@ pub(crate) async fn fetch_join_signing_keys( return Ok(()); } - for server in db.globals.trusted_servers() { + for server in services().globals.trusted_servers() { trace!("Asking batch signing keys from trusted server {}", server); - if let Ok(keys) = db + if let Ok(keys) = services() .sending .send_federation_request( - &db.globals, server, get_remote_server_keys_batch::v2::Request { server_keys: servers.clone(), @@ -2164,7 +2120,7 @@ pub(crate) async fn fetch_join_signing_keys( // TODO: Check signature from trusted server? servers.remove(&k.server_name); - let result = db + let result = services() .globals .add_signing_key(&k.server_name, k.clone())? .into_iter() @@ -2184,9 +2140,8 @@ pub(crate) async fn fetch_join_signing_keys( .into_iter() .map(|(server, _)| async move { ( - db.sending + services().sending .send_federation_request( - &db.globals, &server, get_server_keys::v2::Request::new(), ) @@ -2198,7 +2153,7 @@ pub(crate) async fn fetch_join_signing_keys( while let Some(result) = futures.next().await { if let (Ok(get_keys_response), origin) = result { - let result: BTreeMap<_, _> = db + let result: BTreeMap<_, _> = services() .globals .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? .into_iter() @@ -2216,8 +2171,8 @@ pub(crate) async fn fetch_join_signing_keys( } /// Returns Ok if the acl allows the server -fn acl_check(server_name: &ServerName, room_id: &RoomId, db: &Database) -> Result<()> { - let acl_event = match db +fn acl_check(server_name: &ServerName, room_id: &RoomId) -> Result<()> { + let acl_event = match services() .rooms .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? { diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 29325bd6..93660f9f 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -30,7 +30,7 @@ pub trait KeyValueDatabaseEngine: Send + Sync { fn open(config: &Config) -> Result where Self: Sized; - fn open_tree(&self, name: &'static str) -> Result>; + fn open_tree(&self, name: &'static str) -> Result>; fn flush(&self) -> Result<()>; fn cleanup(&self) -> Result<()> { Ok(()) @@ -40,7 +40,7 @@ pub trait KeyValueDatabaseEngine: Send + Sync { } } -pub trait KeyValueTree: Send + Sync { +pub trait KvTree: Send + Sync { fn get(&self, key: &[u8]) -> Result>>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 2cf9d5ee..1388dc38 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,4 +1,4 @@ -use super::{super::Config, watchers::Watchers, DatabaseEngine, Tree}; +use super::{super::Config, watchers::Watchers, KvTree, KeyValueDatabaseEngine}; use crate::{utils, Result}; use std::{ future::Future, @@ -51,7 +51,7 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O db_opts } -impl DatabaseEngine for Arc { +impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); @@ -83,7 +83,7 @@ impl DatabaseEngine for Arc { })) } - fn open_tree(&self, name: &'static str) -> Result> { + fn open_tree(&self, name: &'static str) -> Result> { if !self.old_cfs.contains(&name.to_owned()) { // Create if it didn't exist let _ = self @@ -129,7 +129,7 @@ impl RocksDbEngineTree<'_> { } } -impl Tree for RocksDbEngineTree<'_> { +impl KvTree for RocksDbEngineTree<'_> { fn get(&self, key: &[u8]) -> Result>> { Ok(self.db.rocks.get_cf(&self.cf(), key)?) } diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 7cfa81af..02d4dbd6 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -1,4 +1,4 @@ -use super::{watchers::Watchers, DatabaseEngine, Tree}; +use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}; use crate::{database::Config, Result}; use parking_lot::{Mutex, MutexGuard}; use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; @@ -80,7 +80,7 @@ impl Engine { } } -impl DatabaseEngine for Arc { +impl KeyValueDatabaseEngine for Arc { fn open(config: &Config) -> Result { let path = Path::new(&config.database_path).join("conduit.db"); @@ -105,7 +105,7 @@ impl DatabaseEngine for Arc { Ok(arc) } - fn open_tree(&self, name: &str) -> Result> { + fn open_tree(&self, name: &str) -> Result> { self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; Ok(Arc::new(SqliteTable { @@ -189,7 +189,7 @@ impl SqliteTable { } } -impl Tree for SqliteTable { +impl KvTree for SqliteTable { fn get(&self, key: &[u8]) -> Result>> { self.get_with_guard(self.engine.read_lock(), key) } diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index 66a2a5c8..eae2cfbc 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,6 +1,8 @@ +use crate::{database::KeyValueDatabase, service, utils, Error}; + impl service::appservice::Data for KeyValueDatabase { /// Registers an appservice and returns the ID to the caller - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { + fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs index 0c09c17e..189571f6 100644 --- a/src/database/key_value/mod.rs +++ b/src/database/key_value/mod.rs @@ -1,13 +1,13 @@ -mod account_data; -mod admin; +//mod account_data; +//mod admin; mod appservice; -mod globals; -mod key_backups; -mod media; -mod pdu; +//mod globals; +//mod key_backups; +//mod media; +//mod pdu; mod pusher; mod rooms; -mod sending; +//mod sending; mod transaction_ids; mod uiaa; mod users; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 94374ab2..b77170db 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,3 +1,7 @@ +use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; + +use crate::{service, database::KeyValueDatabase, Error}; + impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index b00eb3b1..a9236a75 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,4 +1,8 @@ -impl service::room::alias::Data for KeyValueDatabase { +use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; + +use crate::{service, database::KeyValueDatabase, utils, Error, services}; + +impl service::rooms::alias::Data for KeyValueDatabase { fn set_alias( &self, alias: &RoomAliasId, @@ -8,7 +12,7 @@ impl service::room::alias::Data for KeyValueDatabase { .insert(alias.alias().as_bytes(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; Ok(()) } diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index f42de45e..44a580c3 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,10 +1,14 @@ -impl service::room::directory::Data for KeyValueDatabase { +use ruma::RoomId; + +use crate::{service, database::KeyValueDatabase, utils, Error}; + +impl service::rooms::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.insert(room_id.as_bytes(), &[])?; + self.publicroomids.insert(room_id.as_bytes(), &[]) } fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.publicroomids.remove(room_id.as_bytes())?; + self.publicroomids.remove(room_id.as_bytes()) } fn is_public_room(&self, room_id: &RoomId) -> Result { diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs new file mode 100644 index 00000000..9ffd33da --- /dev/null +++ b/src/database/key_value/rooms/edus/mod.rs @@ -0,0 +1,3 @@ +mod presence; +mod typing; +mod read_receipt; diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 1978ce7b..9f3977db 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,4 +1,10 @@ -impl service::room::edus::presence::Data for KeyValueDatabase { +use std::collections::HashMap; + +use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; + +use crate::{service, database::KeyValueDatabase, utils, Error, services}; + +impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( &self, user_id: &UserId, @@ -7,7 +13,7 @@ impl service::room::edus::presence::Data for KeyValueDatabase { ) -> Result<()> { // TODO: Remove old entry? Or maybe just wipe completely from time to time? - let count = globals.next_count()?.to_be_bytes(); + let count = services().globals.next_count()?.to_be_bytes(); let mut presence_id = room_id.as_bytes().to_vec(); presence_id.push(0xff); @@ -101,6 +107,7 @@ impl service::room::edus::presence::Data for KeyValueDatabase { Ok(hashmap) } + /* fn presence_maintain(&self, db: Arc>) { // TODO @M0dEx: move this to a timed tasks module tokio::spawn(async move { @@ -117,6 +124,7 @@ impl service::room::edus::presence::Data for KeyValueDatabase { } }); } + */ } fn parse_presence_event(bytes: &[u8]) -> Result { diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 556e697f..68aea165 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,4 +1,10 @@ -impl service::room::edus::read_receipt::Data for KeyValueDatabase { +use std::mem; + +use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; + +use crate::{database::KeyValueDatabase, service, utils, Error, services}; + +impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( &self, user_id: &UserId, @@ -28,7 +34,7 @@ impl service::room::edus::read_receipt::Data for KeyValueDatabase { } let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); + room_latest_id.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); room_latest_id.push(0xff); room_latest_id.extend_from_slice(user_id.as_bytes()); @@ -40,7 +46,7 @@ impl service::room::edus::read_receipt::Data for KeyValueDatabase { Ok(()) } - pub fn readreceipts_since<'a>( + fn readreceipts_since<'a>( &'a self, room_id: &RoomId, since: u64, @@ -102,7 +108,7 @@ impl service::room::edus::read_receipt::Data for KeyValueDatabase { .insert(&key, &count.to_be_bytes())?; self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes()) } fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 8cfb432d..905bffc8 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,15 +1,20 @@ -impl service::room::edus::typing::Data for KeyValueDatabase { +use std::collections::HashSet; + +use ruma::{UserId, RoomId}; + +use crate::{database::KeyValueDatabase, service, utils, Error, services}; + +impl service::rooms::edus::typing::Data for KeyValueDatabase { fn typing_add( &self, user_id: &UserId, room_id: &RoomId, timeout: u64, - globals: &super::super::globals::Globals, ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - let count = globals.next_count()?.to_be_bytes(); + let count = services().globals.next_count()?.to_be_bytes(); let mut room_typing_id = prefix; room_typing_id.extend_from_slice(&timeout.to_be_bytes()); @@ -49,7 +54,7 @@ impl service::room::edus::typing::Data for KeyValueDatabase { if found_outdated { self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(room_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; } Ok(()) diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index 8abdce49..c230cbf7 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,4 +1,8 @@ -impl service::room::lazy_load::Data for KeyValueDatabase { +use ruma::{UserId, DeviceId, RoomId}; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::lazy_loading::Data for KeyValueDatabase { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 37dd7173..b4cba2c6 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,4 +1,8 @@ -impl service::room::metadata::Data for KeyValueDatabase { +use ruma::RoomId; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { let prefix = match self.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index 2a3f81d8..adb810ba 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -1,14 +1,13 @@ -mod state; mod alias; mod directory; mod edus; -mod event_handler; -mod lazy_loading; +//mod event_handler; +mod lazy_load; mod metadata; mod outlier; mod pdu_metadata; mod search; -mod short; +//mod short; mod state; mod state_accessor; mod state_cache; diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index c979d253..08299a0c 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,4 +1,8 @@ -impl service::room::outlier::Data for KeyValueDatabase { +use ruma::{EventId, signatures::CanonicalJsonObject}; + +use crate::{service, database::KeyValueDatabase, PduEvent, Error}; + +impl service::rooms::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index 6b2171ca..602f3f6c 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -1,4 +1,10 @@ -impl service::room::pdu_metadata::Data for KeyValueDatabase { +use std::sync::Arc; + +use ruma::{RoomId, EventId}; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 1ffffe56..44663ff3 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,7 +1,12 @@ -impl service::room::search::Data for KeyValueDatabase { +use std::mem::size_of; +use ruma::RoomId; + +use crate::{service, database::KeyValueDatabase, utils}; + +impl service::rooms::search::Data for KeyValueDatabase { fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()> { - let mut batch = body + let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) .filter(|word| word.len() <= 50) @@ -14,7 +19,7 @@ impl service::room::search::Data for KeyValueDatabase { (key, Vec::new()) }); - self.tokenids.insert_batch(&mut batch)?; + self.tokenids.insert_batch(&mut batch) } fn search_pdus<'a>( @@ -64,3 +69,4 @@ impl service::room::search::Data for KeyValueDatabase { ) })) } +} diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 5daf6c6a..192dbb83 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,4 +1,11 @@ -impl service::room::state::Data for KeyValueDatabase { +use ruma::{RoomId, EventId}; +use std::sync::Arc; +use std::{sync::MutexGuard, collections::HashSet}; +use std::fmt::Debug; + +use crate::{service, database::KeyValueDatabase, utils, Error}; + +impl service::rooms::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash .get(room_id.as_bytes())? @@ -9,21 +16,21 @@ impl service::room::state::Data for KeyValueDatabase { }) } - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64 - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; Ok(()) } - fn set_event_state(&self) -> Result<()> { - db.shorteventid_shortstatehash + fn set_event_state(&self, shorteventid: Vec, shortstatehash: Vec) -> Result<()> { + self.shorteventid_shortstatehash .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } - fn get_pdu_leaves(&self, room_id: &RoomId) -> Result>> { + fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -38,11 +45,11 @@ impl service::room::state::Data for KeyValueDatabase { .collect() } - fn set_forward_extremities( + fn set_forward_extremities<'a>( &self, room_id: &RoomId, event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index db81967d..ea15afc0 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,4 +1,11 @@ -impl service::room::state_accessor::Data for KeyValueDatabase { +use std::{collections::{BTreeMap, HashMap}, sync::Arc}; + +use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils}; +use async_trait::async_trait; +use ruma::{EventId, events::StateEventType, RoomId}; + +#[async_trait] +impl service::rooms::state_accessor::Data for KeyValueDatabase { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = self .load_shortstatehash_info(shortstatehash)? @@ -149,3 +156,4 @@ impl service::room::state_accessor::Data for KeyValueDatabase { Ok(None) } } +} diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 37814020..567dc809 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,8 +1,12 @@ -impl service::room::state_cache::Data for KeyValueDatabase { - fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()> { +use ruma::{UserId, RoomId}; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::state_cache::Data for KeyValueDatabase { + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; + self.roomuseroncejoinedids.insert(&userroom_id, &[]) } } diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 71a2f3a0..09e35660 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,11 +1,20 @@ -impl service::room::state_compressor::Data for KeyValueDatabase { - fn get_statediff(shortstatehash: u64) -> Result { +use std::{collections::HashSet, mem::size_of}; + +use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils}; + +impl service::rooms::state_compressor::Data for KeyValueDatabase { + fn get_statediff(&self, shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff .get(&shortstatehash.to_be_bytes())? .ok_or_else(|| Error::bad_database("State hash does not exist"))?; let parent = utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); + let parent = if parent != 0 { + Some(parent) + } else { + None + }; let mut add_mode = true; let mut added = HashSet::new(); @@ -26,10 +35,10 @@ impl service::room::state_compressor::Data for KeyValueDatabase { i += 2 * size_of::(); } - StateDiff { parent, added, removed } + Ok(StateDiff { parent, added, removed }) } - fn save_statediff(shortstatehash: u64, diff: StateDiff) -> Result<()> { + fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { let mut value = diff.parent.to_be_bytes().to_vec(); for new in &diff.new { value.extend_from_slice(&new[..]); @@ -43,6 +52,6 @@ impl service::room::state_compressor::Data for KeyValueDatabase { } self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; + .insert(&shortstatehash.to_be_bytes(), &value) } } diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 58884ec3..cf93df12 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,4 +1,11 @@ -impl service::room::timeline::Data for KeyValueDatabase { +use std::{collections::hash_map, mem::size_of, sync::Arc}; + +use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject}; +use tracing::error; + +use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent}; + +impl service::rooms::timeline::Data for KeyValueDatabase { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache @@ -37,7 +44,7 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + fn get_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map_or_else( @@ -55,7 +62,7 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( + fn get_non_outlier_pdu_json( &self, event_id: &EventId, ) -> Result> { @@ -74,14 +81,14 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { + fn get_pdu_id(&self, event_id: &EventId) -> Result>> { self.eventid_pduid.get(event_id.as_bytes()) } /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map(|pduid| { @@ -99,7 +106,7 @@ impl service::room::timeline::Data for KeyValueDatabase { /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { + fn get_pdu(&self, event_id: &EventId) -> Result>> { if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { return Ok(Some(Arc::clone(p))); } @@ -135,7 +142,7 @@ impl service::room::timeline::Data for KeyValueDatabase { /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { + fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) @@ -145,7 +152,7 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { + fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { Ok(Some( serde_json::from_slice(&pdu) @@ -155,7 +162,7 @@ impl service::room::timeline::Data for KeyValueDatabase { } /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { + fn pdu_count(&self, pdu_id: &[u8]) -> Result { utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } @@ -178,7 +185,7 @@ impl service::room::timeline::Data for KeyValueDatabase { /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. - pub fn pdus_since<'a>( + fn pdus_since<'a>( &'a self, user_id: &UserId, room_id: &RoomId, @@ -212,7 +219,7 @@ impl service::room::timeline::Data for KeyValueDatabase { /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. - pub fn pdus_until<'a>( + fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, @@ -246,7 +253,7 @@ impl service::room::timeline::Data for KeyValueDatabase { })) } - pub fn pdus_after<'a>( + fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 52145ced..2fc3b9f4 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,4 +1,8 @@ -impl service::room::user::Data for KeyValueDatabase { +use ruma::{UserId, RoomId}; + +use crate::{service, database::KeyValueDatabase, utils, Error}; + +impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index 81c1197d..6652a627 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,5 +1,9 @@ -impl service::pusher::Data for KeyValueDatabase { - pub fn add_txnid( +use ruma::{UserId, DeviceId, TransactionId}; + +use crate::{service, database::KeyValueDatabase}; + +impl service::transaction_ids::Data for KeyValueDatabase { + fn add_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, @@ -17,7 +21,7 @@ impl service::pusher::Data for KeyValueDatabase { Ok(()) } - pub fn existing_txnid( + fn existing_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 4d1dac57..b1960bd5 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,3 +1,9 @@ +use std::io::ErrorKind; + +use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::uiaa::UiaaInfo}; + +use crate::{database::KeyValueDatabase, service, Error}; + impl service::uiaa::Data for KeyValueDatabase { fn set_uiaa_request( &self, diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 5ef058f3..ea844903 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,11 +1,18 @@ +use std::{mem::size_of, collections::BTreeMap}; + +use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; +use tracing::warn; + +use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services}; + impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. - pub fn exists(&self, user_id: &UserId) -> Result { + fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) } /// Check if account is deactivated - pub fn is_deactivated(&self, user_id: &UserId) -> Result { + fn is_deactivated(&self, user_id: &UserId) -> Result { Ok(self .userid_password .get(user_id.as_bytes())? @@ -16,33 +23,13 @@ impl service::users::Data for KeyValueDatabase { .is_empty()) } - /// Check if a user is an admin - pub fn is_admin( - &self, - user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, - ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - - rooms.is_joined(user_id, &admin_room_id) - } - - /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.set_password(user_id, password)?; - Ok(()) - } - /// Returns the number of users registered on this server. - pub fn count(&self) -> Result { + fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result, String)>> { + fn find_from_token(&self, token: &str) -> Result, String)>> { self.token_userdeviceid .get(token.as_bytes())? .map_or(Ok(None), |bytes| { @@ -69,7 +56,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator>> + '_ { + fn iter(&self) -> impl Iterator>> + '_ { self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") @@ -81,7 +68,7 @@ impl service::users::Data for KeyValueDatabase { /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - pub fn list_local_users(&self) -> Result> { + fn list_local_users(&self) -> Result> { let users: Vec = self .userid_password .iter() @@ -113,7 +100,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns the password hash for the given user. - pub fn password_hash(&self, user_id: &UserId) -> Result> { + fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -124,7 +111,7 @@ impl service::users::Data for KeyValueDatabase { } /// Hash and set the user's password to the Argon2 hash - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { if let Ok(hash) = utils::calculate_hash(password) { self.userid_password @@ -143,7 +130,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns the displayname of a user on this homeserver. - pub fn displayname(&self, user_id: &UserId) -> Result> { + fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -154,7 +141,7 @@ impl service::users::Data for KeyValueDatabase { } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { + fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { if let Some(displayname) = displayname { self.userid_displayname .insert(user_id.as_bytes(), displayname.as_bytes())?; @@ -166,7 +153,7 @@ impl service::users::Data for KeyValueDatabase { } /// Get the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result>> { + fn avatar_url(&self, user_id: &UserId) -> Result>> { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { @@ -179,7 +166,7 @@ impl service::users::Data for KeyValueDatabase { } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; @@ -191,7 +178,7 @@ impl service::users::Data for KeyValueDatabase { } /// Get the blurhash of a user. - pub fn blurhash(&self, user_id: &UserId) -> Result> { + fn blurhash(&self, user_id: &UserId) -> Result> { self.userid_blurhash .get(user_id.as_bytes())? .map(|bytes| { @@ -204,7 +191,7 @@ impl service::users::Data for KeyValueDatabase { } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { + fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { if let Some(blurhash) = blurhash { self.userid_blurhash .insert(user_id.as_bytes(), blurhash.as_bytes())?; @@ -216,7 +203,7 @@ impl service::users::Data for KeyValueDatabase { } /// Adds a new device to a user. - pub fn create_device( + fn create_device( &self, user_id: &UserId, device_id: &DeviceId, @@ -250,7 +237,7 @@ impl service::users::Data for KeyValueDatabase { } /// Removes a device from a user. - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -280,7 +267,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all device ids of this user. - pub fn all_device_ids<'a>( + fn all_device_ids<'a>( &'a self, user_id: &UserId, ) -> impl Iterator>> + 'a { @@ -302,7 +289,7 @@ impl service::users::Data for KeyValueDatabase { } /// Replaces the access token of one device. - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -325,13 +312,12 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - pub fn add_one_time_key( + fn add_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -356,12 +342,12 @@ impl service::users::Data for KeyValueDatabase { )?; self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; Ok(()) } - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { + fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate .get(user_id.as_bytes())? .map(|bytes| { @@ -372,12 +358,11 @@ impl service::users::Data for KeyValueDatabase { .unwrap_or(Ok(0)) } - pub fn take_one_time_key( + fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - globals: &super::globals::Globals, ) -> Result, Raw)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -388,7 +373,7 @@ impl service::users::Data for KeyValueDatabase { prefix.push(b':'); self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; self.onetimekeyid_onetimekeys .scan_prefix(prefix) @@ -411,7 +396,7 @@ impl service::users::Data for KeyValueDatabase { .transpose() } - pub fn count_one_time_keys( + fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, @@ -443,13 +428,11 @@ impl service::users::Data for KeyValueDatabase { Ok(counts) } - pub fn add_device_keys( + fn add_device_keys( &self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -460,19 +443,17 @@ impl service::users::Data for KeyValueDatabase { &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), )?; - self.mark_device_key_update(user_id, rooms, globals)?; + self.mark_device_key_update(user_id)?; Ok(()) } - pub fn add_cross_signing_keys( + fn add_cross_signing_keys( &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { // TODO: Check signatures @@ -575,19 +556,17 @@ impl service::users::Data for KeyValueDatabase { .insert(user_id.as_bytes(), &user_signing_key_key)?; } - self.mark_device_key_update(user_id, rooms, globals)?; + self.mark_device_key_update(user_id)?; Ok(()) } - pub fn sign_key( + fn sign_key( &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = target_id.as_bytes().to_vec(); key.push(0xff); @@ -619,12 +598,12 @@ impl service::users::Data for KeyValueDatabase { )?; // TODO: Should we notify about this change? - self.mark_device_key_update(target_id, rooms, globals)?; + self.mark_device_key_update(target_id)?; Ok(()) } - pub fn keys_changed<'a>( + fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, @@ -662,16 +641,14 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn mark_device_key_update( + fn mark_device_key_update( &self, user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { + let count = services().globals.next_count()?.to_be_bytes(); + for room_id in services().rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { // Don't send key updates to unencrypted rooms - if rooms + if services().rooms .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() { @@ -693,7 +670,7 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - pub fn get_device_keys( + fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, @@ -709,7 +686,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn get_master_key bool>( + fn get_master_key bool>( &self, user_id: &UserId, allowed_signatures: F, @@ -730,7 +707,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn get_self_signing_key bool>( + fn get_self_signing_key bool>( &self, user_id: &UserId, allowed_signatures: F, @@ -751,7 +728,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { + fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { self.userid_usersigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { @@ -763,20 +740,19 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn add_to_device_event( + fn add_to_device_event( &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = target_user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(target_device_id.as_bytes()); key.push(0xff); - key.extend_from_slice(&globals.next_count()?.to_be_bytes()); + key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); let mut json = serde_json::Map::new(); json.insert("type".to_owned(), event_type.to_owned().into()); @@ -790,7 +766,7 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - pub fn get_to_device_events( + fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, @@ -812,7 +788,7 @@ impl service::users::Data for KeyValueDatabase { Ok(events) } - pub fn remove_to_device_events( + fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, @@ -833,7 +809,7 @@ impl service::users::Data for KeyValueDatabase { .map(|(key, _)| { Ok::<_, Error>(( key.clone(), - utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) + utils::u64_from_bytes(&key[key.len() - size_of::()..key.len()]) .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, )) }) @@ -846,7 +822,7 @@ impl service::users::Data for KeyValueDatabase { Ok(()) } - pub fn update_device_metadata( + fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, @@ -871,7 +847,7 @@ impl service::users::Data for KeyValueDatabase { } /// Get device metadata. - pub fn get_device_metadata( + fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, @@ -889,7 +865,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { + fn get_devicelist_version(&self, user_id: &UserId) -> Result> { self.userid_devicelistversion .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -899,7 +875,7 @@ impl service::users::Data for KeyValueDatabase { }) } - pub fn all_devices_metadata<'a>( + fn all_devices_metadata<'a>( &'a self, user_id: &UserId, ) -> impl Iterator> + 'a { @@ -915,7 +891,7 @@ impl service::users::Data for KeyValueDatabase { } /// Creates a new sync filter. Returns the filter id. - pub fn create_filter( + fn create_filter( &self, user_id: &UserId, filter: &IncomingFilterDefinition, @@ -934,7 +910,7 @@ impl service::users::Data for KeyValueDatabase { Ok(filter_id) } - pub fn get_filter( + fn get_filter( &self, user_id: &UserId, filter_id: &str, diff --git a/src/database/mod.rs b/src/database/mod.rs index a35228aa..12758af2 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,20 +1,7 @@ pub mod abstraction; +pub mod key_value; -pub mod account_data; -pub mod admin; -pub mod appservice; -pub mod globals; -pub mod key_backups; -pub mod media; -pub mod pusher; -pub mod rooms; -pub mod sending; -pub mod transaction_ids; -pub mod uiaa; -pub mod users; - -use self::admin::create_admin_room; -use crate::{utils, Config, Error, Result}; +use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, admin::{self, create_admin_room}, appservice, pusher}}; use abstraction::KeyValueDatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -25,7 +12,7 @@ use ruma::{ GlobalAccountDataEvent, GlobalAccountDataEventType, }, push::Ruleset, - DeviceId, EventId, RoomId, UserId, + DeviceId, EventId, RoomId, UserId, signatures::CanonicalJsonValue, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -38,21 +25,132 @@ use std::{ }; use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, info, warn}; +use abstraction::KvTree; pub struct KeyValueDatabase { _db: Arc, - pub globals: globals::Globals, - pub users: users::Users, - pub uiaa: uiaa::Uiaa, - pub rooms: rooms::Rooms, - pub account_data: account_data::AccountData, - pub media: media::Media, - pub key_backups: key_backups::KeyBackups, - pub transaction_ids: transaction_ids::TransactionIds, - pub sending: sending::Sending, - pub admin: admin::Admin, - pub appservice: appservice::Appservice, - pub pusher: pusher::PushData, + + //pub globals: globals::Globals, + pub(super) global: Arc, + pub(super) server_signingkeys: Arc, + + //pub users: users::Users, + pub(super) userid_password: Arc, + pub(super) userid_displayname: Arc, + pub(super) userid_avatarurl: Arc, + pub(super) userid_blurhash: Arc, + pub(super) userdeviceid_token: Arc, + pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists + pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 + pub(super) token_userdeviceid: Arc, + + pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId + pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count + pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count + pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) + pub(super) userid_masterkeyid: Arc, + pub(super) userid_selfsigningkeyid: Arc, + pub(super) userid_usersigningkeyid: Arc, + + pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId + + pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count + + //pub uiaa: uiaa::Uiaa, + pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication + pub(super) userdevicesessionid_uiaarequest: + RwLock, Box, String), CanonicalJsonValue>>, + + //pub edus: RoomEdus, + pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId + pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count + pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count + pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count + pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count + pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count + + //pub rooms: rooms::Rooms, + pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count + pub(super) eventid_pduid: Arc, + pub(super) roomid_pduleaves: Arc, + pub(super) alias_roomid: Arc, + pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count + pub(super) publicroomids: Arc, + + pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount + + /// Participating servers in a room. + pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName + pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId + + pub(super) userroomid_joined: Arc, + pub(super) roomuserid_joined: Arc, + pub(super) roomid_joinedcount: Arc, + pub(super) roomid_invitedcount: Arc, + pub(super) roomuseroncejoinedids: Arc, + pub(super) userroomid_invitestate: Arc, // InviteState = Vec> + pub(super) roomuserid_invitecount: Arc, // InviteCount = Count + pub(super) userroomid_leftstate: Arc, + pub(super) roomuserid_leftcount: Arc, + + pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled + + pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + + pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 + pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 + + /// Remember the current state hash of a room. + pub(super) roomid_shortstatehash: Arc, + pub(super) roomsynctoken_shortstatehash: Arc, + /// Remember the state hash at events in the past. + pub(super) shorteventid_shortstatehash: Arc, + /// StateKey = EventType + StateKey, ShortStateKey = Count + pub(super) statekey_shortstatekey: Arc, + pub(super) shortstatekey_statekey: Arc, + + pub(super) roomid_shortroomid: Arc, + + pub(super) shorteventid_eventid: Arc, + pub(super) eventid_shorteventid: Arc, + + pub(super) statehash_shortstatehash: Arc, + pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) + + pub(super) shorteventid_authchain: Arc, + + /// RoomId + EventId -> outlier PDU. + /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. + pub(super) eventid_outlierpdu: Arc, + pub(super) softfailedeventids: Arc, + + /// RoomId + EventId -> Parent PDU EventId. + pub(super) referencedevents: Arc, + + //pub account_data: account_data::AccountData, + pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type + pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type + + //pub media: media::Media, + pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType + //pub key_backups: key_backups::KeyBackups, + pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) + pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) + pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId + + //pub transaction_ids: transaction_ids::TransactionIds, + pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) + //pub sending: sending::Sending, + pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync + pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content + pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content + + //pub appservice: appservice::Appservice, + pub(super) id_appserviceregistrations: Arc, + + //pub pusher: pusher::PushData, + pub(super) senderkey_pusher: Arc, } impl KeyValueDatabase { @@ -157,7 +255,6 @@ impl KeyValueDatabase { let db = Arc::new(TokioRwLock::from(Self { _db: builder.clone(), - users: users::Users { userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, userid_avatarurl: builder.open_tree("userid_avatarurl")?, @@ -175,13 +272,9 @@ impl KeyValueDatabase { userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, userfilterid_filter: builder.open_tree("userfilterid_filter")?, todeviceid_events: builder.open_tree("todeviceid_events")?, - }, - uiaa: uiaa::Uiaa { + userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), - }, - rooms: rooms::Rooms { - edus: rooms::RoomEdus { readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt roomuserid_lastprivatereadupdate: builder @@ -190,7 +283,6 @@ impl KeyValueDatabase { roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, presenceid_presence: builder.open_tree("presenceid_presence")?, userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, - }, pduid_pdu: builder.open_tree("pduid_pdu")?, eventid_pduid: builder.open_tree("eventid_pduid")?, roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, @@ -239,74 +331,23 @@ impl KeyValueDatabase { softfailedeventids: builder.open_tree("softfailedeventids")?, referencedevents: builder.open_tree("referencedevents")?, - pdu_cache: Mutex::new(LruCache::new( - config - .pdu_cache_capacity - .try_into() - .expect("pdu cache capacity fits into usize"), - )), - auth_chain_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shorteventid_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - eventidshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shortstatekey_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - statekeyshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - our_real_users_cache: RwLock::new(HashMap::new()), - appservice_in_room_cache: RwLock::new(HashMap::new()), - lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - lasttimelinecount_cache: Mutex::new(HashMap::new()), - }, - account_data: account_data::AccountData { roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, - }, - media: media::Media { mediaid_file: builder.open_tree("mediaid_file")?, - }, - key_backups: key_backups::KeyBackups { backupid_algorithm: builder.open_tree("backupid_algorithm")?, backupid_etag: builder.open_tree("backupid_etag")?, backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, - }, - transaction_ids: transaction_ids::TransactionIds { userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, - }, - sending: sending::Sending { servername_educount: builder.open_tree("servername_educount")?, servernameevent_data: builder.open_tree("servernameevent_data")?, servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, - maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), - sender: sending_sender, - }, - admin: admin::Admin { - sender: admin_sender, - }, - appservice: appservice::Appservice { - cached_registrations: Arc::new(RwLock::new(HashMap::new())), id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, - }, - pusher: pusher::PushData { senderkey_pusher: builder.open_tree("senderkey_pusher")?, - }, - globals: globals::Globals::load( - builder.open_tree("global")?, - builder.open_tree("server_signingkeys")?, - config.clone(), - )?, + global: builder.open_tree("global")?, + server_signingkeys: builder.open_tree("server_signingkeys")?, })); + // TODO: do this after constructing the db let guard = db.read().await; // Matrix resource ownership is based on the server name; changing it @@ -744,7 +785,7 @@ impl KeyValueDatabase { .bump_database_version(latest_database_version)?; // Create the admin room and server user on first run - create_admin_room(&guard).await?; + create_admin_room().await?; warn!( "Created new {} database with version {}", diff --git a/src/lib.rs b/src/lib.rs index c35a1293..0d058df3 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,17 +9,26 @@ mod config; mod database; -mod error; -mod pdu; -mod ruma_wrapper; +mod service; +pub mod api; mod utils; -pub mod appservice_server; -pub mod client_server; -pub mod server_server; +use std::cell::Cell; pub use config::Config; -pub use database::Database; -pub use error::{Error, Result}; -pub use pdu::PduEvent; -pub use ruma_wrapper::{Ruma, RumaResponse}; +pub use utils::error::{Error, Result}; +pub use service::{Services, pdu::PduEvent}; +pub use api::ruma_wrapper::{Ruma, RumaResponse}; + +use crate::database::KeyValueDatabase; + +pub static SERVICES: Cell> = Cell::new(None); + +enum ServicesEnum { + Rocksdb(Services) +} + +pub fn services() -> Services { + SERVICES.get().unwrap() +} + diff --git a/src/main.rs b/src/main.rs index a1af9761..543b953e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -46,47 +46,44 @@ use tikv_jemallocator::Jemalloc; #[global_allocator] static GLOBAL: Jemalloc = Jemalloc; -lazy_static! { - static ref DB: Database = { - let raw_config = - Figment::new() - .merge( - Toml::file(Env::var("CONDUIT_CONFIG").expect( - "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", - )) - .nested(), - ) - .merge(Env::prefixed("CONDUIT_").global()); - - let config = match raw_config.extract::() { - Ok(s) => s, - Err(e) => { - eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); - std::process::exit(1); - } - }; - - config.warn_deprecated(); - - let db = match Database::load_or_create(&config).await { - Ok(db) => db, - Err(e) => { - eprintln!( - "The database couldn't be loaded or created. The following error occured: {}", - e - ); - std::process::exit(1); - } - }; - }; -} - #[tokio::main] async fn main() { - lazy_static::initialize(&DB); + // Initialize DB + let raw_config = + Figment::new() + .merge( + Toml::file(Env::var("CONDUIT_CONFIG").expect( + "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", + )) + .nested(), + ) + .merge(Env::prefixed("CONDUIT_").global()); + + let config = match raw_config.extract::() { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your config is invalid. The following error occured while parsing it: {}", e); + std::process::exit(1); + } + }; + + config.warn_deprecated(); + + let db = match KeyValueDatabase::load_or_create(&config).await { + Ok(db) => db, + Err(e) => { + eprintln!( + "The database couldn't be loaded or created. The following error occured: {}", + e + ); + std::process::exit(1); + } + }; + + SERVICES.set(db).expect("this is the first and only time we initialize the SERVICE static"); let start = async { - run_server(&config).await.unwrap(); + run_server().await.unwrap(); }; if config.allow_jaeger { diff --git a/src/service/account_data.rs b/src/service/account_data.rs index d85918f6..70ad9f2a 100644 --- a/src/service/account_data.rs +++ b/src/service/account_data.rs @@ -8,23 +8,15 @@ use ruma::{ use serde::{de::DeserializeOwned, Serialize}; use std::{collections::HashMap, sync::Arc}; -use super::abstraction::Tree; - -pub struct AccountData { - pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type - pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type -} - impl AccountData { /// Places one event in the account data of the user and removes the previous entry. - #[tracing::instrument(skip(self, room_id, user_id, event_type, data, globals))] + #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] pub fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, data: &T, - globals: &super::globals::Globals, ) -> Result<()> { let mut prefix = room_id .map(|r| r.to_string()) @@ -36,7 +28,7 @@ impl AccountData { prefix.push(0xff); let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); roomuserdataid.push(0xff); roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); diff --git a/src/service/admin.rs b/src/service/admin.rs index 6f418ea8..ded0adb9 100644 --- a/src/service/admin.rs +++ b/src/service/admin.rs @@ -5,14 +5,6 @@ use std::{ time::Instant, }; -use crate::{ - client_server::AUTO_GEN_PASSWORD_LENGTH, - error::{Error, Result}, - pdu::PduBuilder, - server_server, utils, - utils::HtmlEscape, - Database, PduEvent, -}; use clap::Parser; use regex::Regex; use ruma::{ @@ -36,6 +28,10 @@ use ruma::{ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; +use crate::{services, Error, api::{server_server, client_server::AUTO_GEN_PASSWORD_LENGTH}, PduEvent, utils::{HtmlEscape, self}}; + +use super::pdu::PduBuilder; + #[derive(Debug)] pub enum AdminRoomEvent { ProcessMessage(String), @@ -50,22 +46,19 @@ pub struct Admin { impl Admin { pub fn start_handler( &self, - db: Arc>, mut receiver: mpsc::UnboundedReceiver, ) { tokio::spawn(async move { // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); - let guard = db.read().await; - - let conduit_user = UserId::parse(format!("@conduit:{}", guard.globals.server_name())) + let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) .expect("@conduit:server_name is valid"); - let conduit_room = guard + let conduit_room = services() .rooms .id_from_alias( - format!("#admins:{}", guard.globals.server_name()) + format!("#admins:{}", services().globals.server_name()) .as_str() .try_into() .expect("#admins:server_name is a valid room alias"), @@ -73,12 +66,9 @@ impl Admin { .expect("Database data for admin room alias must be valid") .expect("Admin room must exist"); - drop(guard); - let send_message = |message: RoomMessageEventContent, - guard: RwLockReadGuard<'_, Database>, mutex_lock: &MutexGuard<'_, ()>| { - guard + services() .rooms .build_and_append_pdu( PduBuilder { @@ -91,7 +81,6 @@ impl Admin { }, &conduit_user, &conduit_room, - &guard, mutex_lock, ) .unwrap(); @@ -100,15 +89,13 @@ impl Admin { loop { tokio::select! { Some(event) = receiver.recv() => { - let guard = db.read().await; - let message_content = match event { AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(&*guard, room_message).await + AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(room_message).await }; let mutex_state = Arc::clone( - guard.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -118,7 +105,7 @@ impl Admin { let state_lock = mutex_state.lock().await; - send_message(message_content, guard, &state_lock); + send_message(message_content, &state_lock); drop(state_lock); } @@ -141,7 +128,7 @@ impl Admin { } // Parse and process a message from the admin room -async fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { +async fn process_admin_message(room_message: String) -> RoomMessageEventContent { let mut lines = room_message.lines(); let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); @@ -149,7 +136,7 @@ async fn process_admin_message(db: &Database, room_message: String) -> RoomMessa let admin_command = match parse_admin_command(&command_line) { Ok(command) => command, Err(error) => { - let server_name = db.globals.server_name(); + let server_name = services().globals.server_name(); let message = error .to_string() .replace("server.name", server_name.as_str()); @@ -159,7 +146,7 @@ async fn process_admin_message(db: &Database, room_message: String) -> RoomMessa } }; - match process_admin_command(db, admin_command, body).await { + match process_admin_command(admin_command, body).await { Ok(reply_message) => reply_message, Err(error) => { let markdown_message = format!( @@ -322,7 +309,6 @@ enum AdminCommand { } async fn process_admin_command( - db: &Database, command: AdminCommand, body: Vec<&str>, ) -> Result { @@ -332,7 +318,7 @@ async fn process_admin_command( let appservice_config = body[1..body.len() - 1].join("\n"); let parsed_config = serde_yaml::from_str::(&appservice_config); match parsed_config { - Ok(yaml) => match db.appservice.register_appservice(yaml) { + Ok(yaml) => match services().appservice.register_appservice(yaml) { Ok(id) => RoomMessageEventContent::text_plain(format!( "Appservice registered with ID: {}.", id @@ -355,7 +341,7 @@ async fn process_admin_command( } AdminCommand::UnregisterAppservice { appservice_identifier, - } => match db.appservice.unregister_appservice(&appservice_identifier) { + } => match services().appservice.unregister_appservice(&appservice_identifier) { Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to unregister appservice: {}", @@ -363,7 +349,7 @@ async fn process_admin_command( )), }, AdminCommand::ListAppservices => { - if let Ok(appservices) = db.appservice.iter_ids().map(|ids| ids.collect::>()) { + if let Ok(appservices) = services().appservice.iter_ids().map(|ids| ids.collect::>()) { let count = appservices.len(); let output = format!( "Appservices ({}): {}", @@ -380,14 +366,14 @@ async fn process_admin_command( } } AdminCommand::ListRooms => { - let room_ids = db.rooms.iter_ids(); + let room_ids = services().rooms.iter_ids(); let output = format!( "Rooms:\n{}", room_ids .filter_map(|r| r.ok()) .map(|id| id.to_string() + "\tMembers: " - + &db + + &services() .rooms .room_joined_count(&id) .ok() @@ -399,7 +385,7 @@ async fn process_admin_command( ); RoomMessageEventContent::text_plain(output) } - AdminCommand::ListLocalUsers => match db.users.list_local_users() { + AdminCommand::ListLocalUsers => match services().users.list_local_users() { Ok(users) => { let mut msg: String = format!("Found {} local user account(s):\n", users.len()); msg += &users.join("\n"); @@ -408,7 +394,7 @@ async fn process_admin_command( Err(e) => RoomMessageEventContent::text_plain(e.to_string()), }, AdminCommand::IncomingFederation => { - let map = db.globals.roomid_federationhandletime.read().unwrap(); + let map = services().globals.roomid_federationhandletime.read().unwrap(); let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); for (r, (e, i)) in map.iter() { @@ -425,7 +411,7 @@ async fn process_admin_command( } AdminCommand::GetAuthChain { event_id } => { let event_id = Arc::::from(event_id); - if let Some(event) = db.rooms.get_pdu_json(&event_id)? { + if let Some(event) = services().rooms.get_pdu_json(&event_id)? { let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) @@ -435,7 +421,7 @@ async fn process_admin_command( Error::bad_database("Invalid room id field in event in database") })?; let start = Instant::now(); - let count = server_server::get_auth_chain(room_id, vec![event_id], db) + let count = server_server::get_auth_chain(room_id, vec![event_id]) .await? .count(); let elapsed = start.elapsed(); @@ -486,10 +472,10 @@ async fn process_admin_command( } AdminCommand::GetPdu { event_id } => { let mut outlier = false; - let mut pdu_json = db.rooms.get_non_outlier_pdu_json(&event_id)?; + let mut pdu_json = services().rooms.get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { outlier = true; - pdu_json = db.rooms.get_pdu_json(&event_id)?; + pdu_json = services().rooms.get_pdu_json(&event_id)?; } match pdu_json { Some(json) => { @@ -519,7 +505,7 @@ async fn process_admin_command( None => RoomMessageEventContent::text_plain("PDU not found."), } } - AdminCommand::DatabaseMemoryUsage => match db._db.memory_usage() { + AdminCommand::DatabaseMemoryUsage => match services()._db.memory_usage() { Ok(response) => RoomMessageEventContent::text_plain(response), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to get database memory usage: {}", @@ -528,12 +514,12 @@ async fn process_admin_command( }, AdminCommand::ShowConfig => { // Construct and send the response - RoomMessageEventContent::text_plain(format!("{}", db.globals.config)) + RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) } AdminCommand::ResetPassword { username } => { let user_id = match UserId::parse_with_server_name( username.as_str().to_lowercase(), - db.globals.server_name(), + services().globals.server_name(), ) { Ok(id) => id, Err(e) => { @@ -545,10 +531,10 @@ async fn process_admin_command( }; // Check if the specified user is valid - if !db.users.exists(&user_id)? - || db.users.is_deactivated(&user_id)? + if !services().users.exists(&user_id)? + || services().users.is_deactivated(&user_id)? || user_id - == UserId::parse_with_server_name("conduit", db.globals.server_name()) + == UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("conduit user exists") { return Ok(RoomMessageEventContent::text_plain( @@ -558,7 +544,7 @@ async fn process_admin_command( let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); - match db.users.set_password(&user_id, Some(new_password.as_str())) { + match services().users.set_password(&user_id, Some(new_password.as_str())) { Ok(()) => RoomMessageEventContent::text_plain(format!( "Successfully reset the password for user {}: {}", user_id, new_password @@ -574,7 +560,7 @@ async fn process_admin_command( // Validate user id let user_id = match UserId::parse_with_server_name( username.as_str().to_lowercase(), - db.globals.server_name(), + services().globals.server_name(), ) { Ok(id) => id, Err(e) => { @@ -589,21 +575,21 @@ async fn process_admin_command( "userid {user_id} is not allowed due to historical" ))); } - if db.users.exists(&user_id)? { + if services().users.exists(&user_id)? { return Ok(RoomMessageEventContent::text_plain(format!( "userid {user_id} already exists" ))); } // Create user - db.users.create(&user_id, Some(password.as_str()))?; + services().users.create(&user_id, Some(password.as_str()))?; // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - db.users + services().users .set_displayname(&user_id, Some(displayname.clone()))?; // Initial account data - db.account_data.update( + services().account_data.update( None, &user_id, ruma::events::GlobalAccountDataEventType::PushRules @@ -614,24 +600,21 @@ async fn process_admin_command( global: ruma::push::Ruleset::server_default(&user_id), }, }, - &db.globals, )?; // we dont add a device since we're not the user, just the creator - db.flush()?; - // Inhibit login does not work for guests RoomMessageEventContent::text_plain(format!( "Created user with user_id: {user_id} and password: {password}" )) } AdminCommand::DisableRoom { room_id } => { - db.rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; + services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; RoomMessageEventContent::text_plain("Room disabled.") } AdminCommand::EnableRoom { room_id } => { - db.rooms.disabledroomids.remove(room_id.as_bytes())?; + services().rooms.disabledroomids.remove(room_id.as_bytes())?; RoomMessageEventContent::text_plain("Room enabled.") } AdminCommand::DeactivateUser { @@ -639,16 +622,16 @@ async fn process_admin_command( user_id, } => { let user_id = Arc::::from(user_id); - if db.users.exists(&user_id)? { + if services().users.exists(&user_id)? { RoomMessageEventContent::text_plain(format!( "Making {} leave all rooms before deactivation...", user_id )); - db.users.deactivate_account(&user_id)?; + services().users.deactivate_account(&user_id)?; if leave_rooms { - db.rooms.leave_all_rooms(&user_id, &db).await?; + services().rooms.leave_all_rooms(&user_id).await?; } RoomMessageEventContent::text_plain(format!( @@ -685,7 +668,7 @@ async fn process_admin_command( if !force { user_ids.retain(|&user_id| { - match db.users.is_admin(user_id, &db.rooms, &db.globals) { + match services().users.is_admin(user_id) { Ok(is_admin) => match is_admin { true => { admins.push(user_id.localpart()); @@ -699,7 +682,7 @@ async fn process_admin_command( } for &user_id in &user_ids { - match db.users.deactivate_account(user_id) { + match services().users.deactivate_account(user_id) { Ok(_) => deactivation_count += 1, Err(_) => {} } @@ -707,7 +690,7 @@ async fn process_admin_command( if leave_rooms { for &user_id in &user_ids { - let _ = db.rooms.leave_all_rooms(user_id, &db).await; + let _ = services().rooms.leave_all_rooms(user_id).await; } } @@ -814,13 +797,13 @@ fn usage_to_html(text: &str, server_name: &ServerName) -> String { /// /// Users in this room are considered admins by conduit, and the room can be /// used to issue admin commands by talking to the server user inside it. -pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { - let room_id = RoomId::new(db.globals.server_name()); +pub(crate) async fn create_admin_room() -> Result<()> { + let room_id = RoomId::new(services().globals.server_name()); - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + services().rooms.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -830,10 +813,10 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { let state_lock = mutex_state.lock().await; // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is valid"); - db.users.create(&conduit_user, None)?; + services().users.create(&conduit_user, None)?; let mut content = RoomCreateEventContent::new(conduit_user.clone()); content.federate = true; @@ -841,7 +824,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { content.room_version = RoomVersionId::V6; // 1. The room create event - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -851,12 +834,11 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 2. Make conduit bot join - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -876,7 +858,6 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; @@ -884,7 +865,7 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { let mut users = BTreeMap::new(); users.insert(conduit_user.clone(), 100.into()); - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { @@ -898,12 +879,11 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 4.1 Join Rules - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) @@ -914,12 +894,11 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 4.2 History Visibility - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( @@ -932,12 +911,11 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 4.3 Guest Access - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) @@ -948,14 +926,13 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 5. Events implied by name and topic - let room_name = RoomName::parse(format!("{} Admin Room", db.globals.server_name())) + let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) .expect("Room name is valid"); - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) @@ -966,15 +943,14 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", db.globals.server_name()), + topic: format!("Manage {}", services().globals.server_name()), }) .expect("event is valid, we just created it"), unsigned: None, @@ -983,16 +959,15 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; // 6. Room alias - let alias: Box = format!("#admins:{}", db.globals.server_name()) + let alias: Box = format!("#admins:{}", services().globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { @@ -1006,11 +981,10 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { }, &conduit_user, &room_id, - &db, &state_lock, )?; - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + services().rooms.set_alias(&alias, Some(&room_id))?; Ok(()) } @@ -1019,20 +993,19 @@ pub(crate) async fn create_admin_room(db: &Database) -> Result<()> { /// /// In conduit, this is equivalent to granting admin privileges. pub(crate) async fn make_user_admin( - db: &Database, user_id: &UserId, displayname: String, ) -> Result<()> { - let admin_room_alias: Box = format!("#admins:{}", db.globals.server_name()) + let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); - let room_id = db + let room_id = services() .rooms .id_from_alias(&admin_room_alias)? .expect("Admin room must exist"); let mutex_state = Arc::clone( - db.globals + services().globals .roomid_mutex_state .write() .unwrap() @@ -1042,11 +1015,11 @@ pub(crate) async fn make_user_admin( let state_lock = mutex_state.lock().await; // Use the server user to grant the new admin's power level - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is valid"); // Invite and join the real user - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -1066,10 +1039,9 @@ pub(crate) async fn make_user_admin( }, &conduit_user, &room_id, - &db, &state_lock, )?; - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -1089,7 +1061,6 @@ pub(crate) async fn make_user_admin( }, &user_id, &room_id, - &db, &state_lock, )?; @@ -1098,7 +1069,7 @@ pub(crate) async fn make_user_admin( users.insert(conduit_user.to_owned(), 100.into()); users.insert(user_id.to_owned(), 100.into()); - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { @@ -1112,17 +1083,16 @@ pub(crate) async fn make_user_admin( }, &conduit_user, &room_id, - &db, &state_lock, )?; // Send welcome message - db.rooms.build_and_append_pdu( + services().rooms.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", db.globals.server_name()).to_owned(), - format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", db.globals.server_name()).to_owned(), + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()).to_owned(), + format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()).to_owned(), )) .expect("event is valid, we just created it"), unsigned: None, @@ -1131,7 +1101,6 @@ pub(crate) async fn make_user_admin( }, &conduit_user, &room_id, - &db, &state_lock, )?; diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index fe57451f..eed84d59 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,17 +1,18 @@ pub trait Data { + type Iter: Iterator; /// Registers an appservice and returns the ID to the caller - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; + fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; /// Remove an appservice registration /// /// # Arguments /// /// * `service_name` - the name you send to register the service previously - pub fn unregister_appservice(&self, service_name: &str) -> Result<()>; + fn unregister_appservice(&self, service_name: &str) -> Result<()>; - pub fn get_registration(&self, id: &str) -> Result>; + fn get_registration(&self, id: &str) -> Result>; - pub fn iter_ids(&self) -> Result> + '_>; + fn iter_ids(&self) -> Result>>; - pub fn all(&self) -> Result>; + fn all(&self) -> Result>; } diff --git a/src/service/key_backups.rs b/src/service/key_backups.rs index 10443f6b..be1d6b18 100644 --- a/src/service/key_backups.rs +++ b/src/service/key_backups.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result}; +use crate::{utils, Error, Result, services}; use ruma::{ api::client::{ backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, @@ -9,22 +9,13 @@ use ruma::{ }; use std::{collections::BTreeMap, sync::Arc}; -use super::abstraction::Tree; - -pub struct KeyBackups { - pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) - pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) - pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId -} - impl KeyBackups { pub fn create_backup( &self, user_id: &UserId, backup_metadata: &Raw, - globals: &super::globals::Globals, ) -> Result { - let version = globals.next_count()?.to_string(); + let version = services().globals.next_count()?.to_string(); let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -35,7 +26,7 @@ impl KeyBackups { &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), )?; self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; Ok(version) } @@ -61,7 +52,6 @@ impl KeyBackups { user_id: &UserId, version: &str, backup_metadata: &Raw, - globals: &super::globals::Globals, ) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -77,7 +67,7 @@ impl KeyBackups { self.backupid_algorithm .insert(&key, backup_metadata.json().get().as_bytes())?; self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; Ok(version.to_owned()) } @@ -157,7 +147,6 @@ impl KeyBackups { room_id: &RoomId, session_id: &str, key_data: &Raw, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -171,7 +160,7 @@ impl KeyBackups { } self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; key.push(0xff); key.extend_from_slice(room_id.as_bytes()); diff --git a/src/service/media.rs b/src/service/media.rs index a4bb4025..1bdf6d47 100644 --- a/src/service/media.rs +++ b/src/service/media.rs @@ -1,4 +1,3 @@ -use crate::database::globals::Globals; use image::{imageops::FilterType, GenericImageView}; use super::abstraction::Tree; diff --git a/src/service/mod.rs b/src/service/mod.rs new file mode 100644 index 00000000..80239cbf --- /dev/null +++ b/src/service/mod.rs @@ -0,0 +1,28 @@ +pub mod pdu; +pub mod appservice; +pub mod pusher; +pub mod rooms; +pub mod transaction_ids; +pub mod uiaa; +pub mod users; +pub mod account_data; +pub mod admin; +pub mod globals; +pub mod key_backups; +pub mod media; +pub mod sending; + +pub struct Services { + pub appservice: appservice::Service, + pub pusher: pusher::Service, + pub rooms: rooms::Service, + pub transaction_ids: transaction_ids::Service, + pub uiaa: uiaa::Service, + pub users: users::Service, + //pub account_data: account_data::Service, + //pub admin: admin::Service, + pub globals: globals::Service, + //pub key_backups: key_backups::Service, + //pub media: media::Service, + //pub sending: sending::Service, +} diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 20ec01ea..47e21a60 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,4 +1,4 @@ -use crate::{Database, Error}; +use crate::{Database, Error, services}; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, @@ -332,7 +332,6 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, - db: &Database, ) -> crate::Result<(Box, CanonicalJsonObject)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); @@ -344,7 +343,7 @@ pub(crate) fn gen_event_id_canonical_json( .and_then(|id| RoomId::parse(id.as_str()?).ok()) .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; - let room_version_id = db.rooms.get_room_version(&room_id); + let room_version_id = services().rooms.get_room_version(&room_id); let event_id = format!( "${}", diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 468ad8b4..ef2b8193 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,11 +1,13 @@ +use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; + pub trait Data { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; - pub fn get_pusher(&self, senderkey: &[u8]) -> Result>; + fn get_pusher(&self, senderkey: &[u8]) -> Result>; - pub fn get_pushers(&self, sender: &UserId) -> Result>; + fn get_pushers(&self, sender: &UserId) -> Result>; - pub fn get_pusher_senderkeys<'a>( + fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, ) -> impl Iterator> + 'a; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 342763e8..87e91a14 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,7 +1,27 @@ mod data; pub use data::Data; -use crate::service::*; +use crate::{services, Error, PduEvent}; +use bytes::BytesMut; +use ruma::{ + api::{ + client::push::{get_pushers, set_pusher, PusherKind}, + push_gateway::send_event_notification::{ + self, + v1::{Device, Notification, NotificationCounts, NotificationPriority}, + }, + MatrixVersion, OutgoingRequest, SendAccessToken, + }, + events::{ + room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, + AnySyncRoomEvent, RoomEventType, StateEventType, + }, + push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, + serde::Raw, + uint, RoomId, UInt, UserId, +}; +use std::{fmt::Debug, mem}; +use tracing::{error, info, warn}; pub struct Service { db: D, @@ -27,9 +47,8 @@ impl Service<_> { self.db.get_pusher_senderkeys(sender) } - #[tracing::instrument(skip(globals, destination, request))] + #[tracing::instrument(skip(destination, request))] pub async fn send_request( - globals: &crate::database::globals::Globals, destination: &str, request: T, ) -> Result @@ -57,7 +76,7 @@ impl Service<_> { //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = globals.default_client().execute(reqwest_request).await; + let response = services().globals.default_client().execute(reqwest_request).await; match response { Ok(mut response) => { @@ -105,19 +124,19 @@ impl Service<_> { } } - #[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] + #[tracing::instrument(skip(user, unread, pusher, ruleset, pdu))] pub async fn send_push_notice( + &self, user: &UserId, unread: UInt, pusher: &get_pushers::v3::Pusher, ruleset: Ruleset, pdu: &PduEvent, - db: &Database, ) -> Result<()> { let mut notify = None; let mut tweaks = Vec::new(); - let power_levels: RoomPowerLevelsEventContent = db + let power_levels: RoomPowerLevelsEventContent = services() .rooms .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { @@ -127,13 +146,12 @@ impl Service<_> { .transpose()? .unwrap_or_default(); - for action in get_actions( + for action in self.get_actions( user, &ruleset, &power_levels, &pdu.to_sync_room_event(), &pdu.room_id, - db, )? { let n = match action { Action::DontNotify => false, @@ -155,27 +173,26 @@ impl Service<_> { } if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; + self.send_notice(unread, pusher, tweaks, pdu).await?; } // Else the event triggered no actions Ok(()) } - #[tracing::instrument(skip(user, ruleset, pdu, db))] + #[tracing::instrument(skip(user, ruleset, pdu))] pub fn get_actions<'a>( + &self, user: &UserId, ruleset: &'a Ruleset, power_levels: &RoomPowerLevelsEventContent, pdu: &Raw, room_id: &RoomId, - db: &Database, ) -> Result<&'a [Action]> { let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users + user_display_name: services().users .displayname(user)? .unwrap_or_else(|| user.localpart().to_owned()), users_power_levels: power_levels.users.clone(), @@ -186,13 +203,13 @@ impl Service<_> { Ok(ruleset.get_actions(pdu, &ctx)) } - #[tracing::instrument(skip(unread, pusher, tweaks, event, db))] + #[tracing::instrument(skip(unread, pusher, tweaks, event))] async fn send_notice( + &self, unread: UInt, pusher: &get_pushers::v3::Pusher, tweaks: Vec, event: &PduEvent, - db: &Database, ) -> Result<()> { // TODO: email if pusher.kind == PusherKind::Email { @@ -240,12 +257,8 @@ impl Service<_> { } if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + self.send_request(url, send_event_notification::v1::Request::new(notifi)) + .await?; } else { notifi.sender = Some(&event.sender); notifi.event_type = Some(&event.kind); @@ -256,11 +269,11 @@ impl Service<_> { notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); } - let user_name = db.users.displayname(&event.sender)?; + let user_name = services().users.displayname(&event.sender)?; notifi.sender_display_name = user_name.as_deref(); let room_name = if let Some(room_name_pdu) = - db.rooms + services().rooms .room_state_get(&event.room_id, &StateEventType::RoomName, "")? { serde_json::from_str::(room_name_pdu.content.get()) @@ -272,8 +285,7 @@ impl Service<_> { notifi.room_name = room_name.as_deref(); - send_request( - &db.globals, + self.send_request( url, send_event_notification::v1::Request::new(notifi), ) diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 9dbfc7b5..655f32aa 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,22 +1,24 @@ +use ruma::{RoomId, RoomAliasId}; + pub trait Data { /// Creates or updates the alias to the given room id. - pub fn set_alias( + fn set_alias( alias: &RoomAliasId, room_id: &RoomId ) -> Result<()>; /// Forgets about an alias. Returns an error if the alias did not exist. - pub fn remove_alias( + fn remove_alias( alias: &RoomAliasId, ) -> Result<()>; /// Looks up the roomid for the given alias. - pub fn resolve_local_alias( + fn resolve_local_alias( alias: &RoomAliasId, ) -> Result<()>; /// Returns all local aliases that point to the given room - pub fn local_aliases_for_room( + fn local_aliases_for_room( alias: &RoomAliasId, ) -> Result<()>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index cfe05396..f46609aa 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,14 +1,13 @@ mod data; pub use data::Data; - -use crate::service::*; +use ruma::{RoomAliasId, RoomId}; pub struct Service { db: D, } impl Service<_> { - #[tracing::instrument(skip(self, globals))] + #[tracing::instrument(skip(self))] pub fn set_alias( &self, alias: &RoomAliasId, @@ -17,7 +16,7 @@ impl Service<_> { self.db.set_alias(alias, room_id) } - #[tracing::instrument(skip(self, globals))] + #[tracing::instrument(skip(self))] pub fn remove_alias( &self, alias: &RoomAliasId, diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index d8fde958..88c86fad 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,3 +1,5 @@ +use std::collections::HashSet; + pub trait Data { fn get_cached_eventid_authchain<'a>() -> Result>; fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result>; diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index dfc289f3..e17c10a1 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,4 +1,6 @@ mod data; +use std::{sync::Arc, collections::HashSet}; + pub use data::Data; use crate::service::*; diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index 83d78853..e28cdd12 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,3 +1,5 @@ +use ruma::RoomId; + pub trait Data { /// Adds the room to the public room directory fn set_public(room_id: &RoomId) -> Result<()>; diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index b92933f4..cb9cda86 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::RoomId; use crate::service::*; @@ -10,21 +11,21 @@ pub struct Service { impl Service<_> { #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId) -> Result<()> { - self.db.set_public(&self, room_id) + self.db.set_public(room_id) } #[tracing::instrument(skip(self))] pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { - self.db.set_not_public(&self, room_id) + self.db.set_not_public(room_id) } #[tracing::instrument(skip(self))] pub fn is_public_room(&self, room_id: &RoomId) -> Result { - self.db.is_public_room(&self, room_id) + self.db.is_public_room(room_id) } #[tracing::instrument(skip(self))] pub fn public_rooms(&self) -> impl Iterator>> + '_ { - self.db.public_rooms(&self, room_id) + self.db.public_rooms() } } diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index d8ce5300..5566fb2c 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -1,3 +1,9 @@ pub mod presence; pub mod read_receipt; pub mod typing; + +pub struct Service { + presence: presence::Service, + read_receipt: read_receipt::Service, + typing: typing::Service, +} diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index de72e219..8e3c672f 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,3 +1,7 @@ +use std::collections::HashMap; + +use ruma::{UserId, RoomId, events::presence::PresenceEvent}; + pub trait Data { /// Adds a presence event which will be saved until a new event replaces it. /// diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 5793a799..5a988d4f 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,8 @@ mod data; +use std::collections::HashMap; + pub use data::Data; +use ruma::{RoomId, UserId, events::presence::PresenceEvent}; use crate::service::*; @@ -108,7 +111,7 @@ impl Service<_> { }*/ /// Returns the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] + #[tracing::instrument(skip(self, since, room_id))] pub fn presence_since( &self, room_id: &RoomId, diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 4befcf2c..32b091f2 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,3 +1,5 @@ +use ruma::{RoomId, events::receipt::ReceiptEvent, UserId, serde::Raw}; + pub trait Data { /// Replaces the previous read receipt. fn readreceipt_update( diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 9cd474fb..744fece1 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,7 +1,6 @@ mod data; pub use data::Data; - -use crate::service::*; +use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; pub struct Service { db: D, @@ -15,7 +14,7 @@ impl Service<_> { room_id: &RoomId, event: ReceiptEvent, ) -> Result<()> { - self.db.readreceipt_update(user_id, room_id, event); + self.db.readreceipt_update(user_id, room_id, event) } /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. @@ -35,7 +34,7 @@ impl Service<_> { } /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] + #[tracing::instrument(skip(self))] pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { self.db.private_read_set(room_id, user_id, count) } diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 83ff90ea..0c773135 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,3 +1,7 @@ +use std::collections::HashSet; + +use ruma::{UserId, RoomId}; + pub trait Data { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index b29c7888..68b9fd83 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::{UserId, RoomId}; use crate::service::*; @@ -66,7 +67,6 @@ impl Service<_> { */ /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] pub fn last_typing_update(&self, room_id: &RoomId) -> Result { self.db.last_typing_update(room_id) } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 5b77586a..71529570 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,8 +1,29 @@ - /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; -use crate::service::*; +use std::{ + collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, + pin::Pin, + sync::{Arc, RwLock}, + time::{Duration, Instant}, +}; + +use futures_util::Future; +use ruma::{ + api::{ + client::error::ErrorKind, + federation::event::{get_event, get_room_state_ids}, + }, + events::{room::create::RoomCreateEventContent, StateEventType}, + int, + serde::Base64, + signatures::CanonicalJsonValue, + state_res::{self, RoomVersion, StateMap}, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, +}; +use tracing::{error, info, trace, warn}; + +use crate::{service::*, services, Error, PduEvent}; pub struct Service; @@ -31,45 +52,47 @@ impl Service { /// it /// 14. Use state resolution to find new room state // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively - #[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] + #[tracing::instrument(skip(value, is_timeline_event, pub_key_map))] pub(crate) async fn handle_incoming_pdu<'a>( + &self, origin: &'a ServerName, event_id: &'a EventId, room_id: &'a RoomId, value: BTreeMap, is_timeline_event: bool, - db: &'a Database, pub_key_map: &'a RwLock>>, ) -> Result>> { - db.rooms.exists(room_id)?.ok_or(Error::BadRequest(ErrorKind::NotFound, "Room is unknown to this server"))?; + services().rooms.exists(room_id)?.ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server", + ))?; + + services() + .rooms + .is_disabled(room_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Federation of this room is currently disabled on this server.", + ))?; - db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of this room is currently disabled on this server."))?; - // 1. Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = db.rooms.get_pdu_id(event_id)? { - return Some(pdu_id.to_vec()); + if let Some(pdu_id) = services().rooms.get_pdu_id(event_id)? { + return Ok(Some(pdu_id.to_vec())); } - let create_event = db + let create_event = services() .rooms .room_state_get(room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; - let first_pdu_in_room = db + let first_pdu_in_room = services() .rooms .first_pdu_in_room(room_id)? .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; - let (incoming_pdu, val) = handle_outlier_pdu( - origin, - &create_event, - event_id, - room_id, - value, - db, - pub_key_map, - ) - .await?; + let (incoming_pdu, val) = self + .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map) + .await?; // 8. if not timeline event: stop if !is_timeline_event { @@ -82,15 +105,27 @@ impl Service { } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let sorted_prev_events = fetch_unknown_prev_events(incoming_pdu.prev_events.clone()); + let (sorted_prev_events, eventid_info) = self.fetch_unknown_prev_events( + origin, + &create_event, + room_id, + pub_key_map, + incoming_pdu.prev_events.clone(), + ); let mut errors = 0; - for prev_id in dbg!(sorted) { + for prev_id in dbg!(sorted_prev_events) { // Check for disabled again because it might have changed - db.rooms.is_disabled(room_id)?.ok_or(Error::BadRequest(ErrorKind::Forbidden, "Federation of - this room is currently disabled on this server."))?; - - if let Some((time, tries)) = db + services() + .rooms + .is_disabled(room_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Federation of + this room is currently disabled on this server.", + ))?; + + if let Some((time, tries)) = services() .globals .bad_event_ratelimiter .read() @@ -120,26 +155,27 @@ impl Service { } let start_time = Instant::now(); - db.globals + services() + .globals .roomid_federationhandletime .write() .unwrap() .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); - if let Err(e) = upgrade_outlier_to_timeline_pdu( - pdu, - json, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await + if let Err(e) = self + .upgrade_outlier_to_timeline_pdu( + pdu, + json, + &create_event, + origin, + room_id, + pub_key_map, + ) + .await { errors += 1; warn!("Prev event {} failed: {}", prev_id, e); - match db + match services() .globals .bad_event_ratelimiter .write() @@ -155,7 +191,8 @@ impl Service { } } let elapsed = start_time.elapsed(); - db.globals + services() + .globals .roomid_federationhandletime .write() .unwrap() @@ -172,22 +209,23 @@ impl Service { // Done with prev events, now handling the incoming event let start_time = Instant::now(); - db.globals + services() + .globals .roomid_federationhandletime .write() .unwrap() .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = upgrade_outlier_to_timeline_pdu( + let r = services().rooms.event_handler.upgrade_outlier_to_timeline_pdu( incoming_pdu, val, &create_event, origin, - db, room_id, pub_key_map, ) .await; - db.globals + services() + .globals .roomid_federationhandletime .write() .unwrap() @@ -196,22 +234,23 @@ impl Service { r } - #[tracing::instrument(skip(create_event, value, db, pub_key_map))] + #[tracing::instrument(skip(create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( + &self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, value: BTreeMap, - db: &'a Database, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> + { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // We go through all the signatures we see on the value and fetch the corresponding signing // keys - fetch_required_signing_keys(&value, pub_key_map, db) + self.fetch_required_signing_keys(&value, pub_key_map, db) .await?; // 2. Check signatures, otherwise drop @@ -223,7 +262,8 @@ impl Service { })?; let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + let room_version = + RoomVersion::new(room_version_id).expect("room version is supported"); let mut val = match ruma::signatures::verify_event( &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, @@ -261,8 +301,7 @@ impl Service { // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // NOTE: Step 5 is not applied anymore because it failed too often warn!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_outliers( - db, + self.fetch_and_handle_outliers( origin, &incoming_pdu .auth_events @@ -284,7 +323,7 @@ impl Service { // Build map of auth events let mut auth_events = HashMap::new(); for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id)? { + let auth_event = match services().rooms.get_pdu(id)? { Some(e) => e, None => { warn!("Could not find auth event {}", id); @@ -303,8 +342,9 @@ impl Service { v.insert(auth_event); } hash_map::Entry::Occupied(_) => { - return Err(Error::BadRequest(ErrorKind::InvalidParam, - "Auth event's type and state_key combination exists multiple times." + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth event's type and state_key combination exists multiple times.", )); } } @@ -316,7 +356,10 @@ impl Service { .map(|a| a.as_ref()) != Some(create_event) { - return Err(Error::BadRequest(ErrorKind::InvalidParam("Incoming event refers to wrong create event."))); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Incoming event refers to wrong create event.", + )); } if !state_res::event_auth::auth_check( @@ -325,15 +368,21 @@ impl Service { None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), ) - .map_err(|e| {error!(e); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")})? - { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed")); + .map_err(|e| { + error!(e); + Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") + })? { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth check failed", + )); } info!("Validation successful."); // 7. Persist the event as an outlier. - db.rooms + services() + .rooms .add_pdu_outlier(&incoming_pdu.event_id, &val)?; info!("Added pdu as outlier."); @@ -342,22 +391,22 @@ impl Service { }) } - #[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))] - async fn upgrade_outlier_to_timeline_pdu( + #[tracing::instrument(skip(incoming_pdu, val, create_event, pub_key_map))] + pub async fn upgrade_outlier_to_timeline_pdu( + &self, incoming_pdu: Arc, val: BTreeMap, create_event: &PduEvent, origin: &ServerName, - db: &Database, room_id: &RoomId, pub_key_map: &RwLock>>, ) -> Result>, String> { // Skip the PDU if we already have it as a timeline event - if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { + if let Ok(Some(pduid)) = services().rooms.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); } - if db + if services() .rooms .is_event_soft_failed(&incoming_pdu.event_id) .map_err(|_| "Failed to ask db for soft fail".to_owned())? @@ -387,32 +436,32 @@ impl Service { if incoming_pdu.prev_events.len() == 1 { let prev_event = &*incoming_pdu.prev_events[0]; - let prev_event_sstatehash = db + let prev_event_sstatehash = services() .rooms .pdu_shortstatehash(prev_event) .map_err(|_| "Failed talking to db".to_owned())?; let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(db.rooms.state_full_ids(shortstatehash).await) + Some(services().rooms.state_full_ids(shortstatehash).await) } else { None }; if let Some(Ok(mut state)) = state { info!("Using cached state"); - let prev_pdu = - db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { + let prev_pdu = services() + .rooms + .get_pdu(prev_event) + .ok() + .flatten() + .ok_or_else(|| { "Could not find prev event, but we know the state.".to_owned() })?; if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = db + let shortstatekey = services() .rooms - .get_or_create_shortstatekey( - &prev_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) + .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; state.insert(shortstatekey, Arc::from(prev_event)); @@ -427,19 +476,20 @@ impl Service { let mut okay = true; for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { + let prev_event = if let Ok(Some(pdu)) = services().rooms.get_pdu(prev_eventid) { pdu } else { okay = false; break; }; - let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { - s - } else { - okay = false; - break; - }; + let sstatehash = + if let Ok(Some(s)) = services().rooms.pdu_shortstatehash(prev_eventid) { + s + } else { + okay = false; + break; + }; extremity_sstatehashes.insert(sstatehash, prev_event); } @@ -449,19 +499,18 @@ impl Service { let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: BTreeMap<_, _> = db + let mut leaf_state: BTreeMap<_, _> = services() .rooms .state_full_ids(sstatehash) .await .map_err(|_| "Failed to ask db for room state.".to_owned())?; if let Some(state_key) = &prev_event.state_key { - let shortstatekey = db + let shortstatekey = services() .rooms .get_or_create_shortstatekey( &prev_event.kind.to_string().into(), state_key, - &db.globals, ) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); @@ -472,7 +521,7 @@ impl Service { let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok((ty, st_key)) = db.rooms.get_statekey_from_short(k) { + if let Ok((ty, st_key)) = services().rooms.get_statekey_from_short(k) { // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType state.insert((ty.to_string().into(), st_key), id.clone()); @@ -483,7 +532,10 @@ impl Service { } auth_chain_sets.push( - get_auth_chain(room_id, starting_events, db) + services() + .rooms + .auth_chain + .get_auth_chain(room_id, starting_events, services()) .await .map_err(|_| "Failed to load auth chain.".to_owned())? .collect(), @@ -492,15 +544,16 @@ impl Service { fork_states.push(state); } - let lock = db.globals.stateres_mutex.lock(); + let lock = services().globals.stateres_mutex.lock(); - let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }); + let result = + state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = services().rooms.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); drop(lock); state_at_incoming_event = match result { @@ -508,14 +561,15 @@ impl Service { new_state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = db + let shortstatekey = services() .rooms .get_or_create_shortstatekey( &event_type.to_string().into(), &state_key, - &db.globals, ) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + .map_err(|_| { + "Failed to get_or_create_shortstatekey".to_owned() + })?; Ok((shortstatekey, event_id)) }) .collect::>()?, @@ -532,10 +586,9 @@ impl Service { info!("Calling /state_ids"); // Call /state_ids to find out what the state at this pdu is. We trust the server's // response to some extend, but we still do a lot of checks on the events - match db + match services() .sending .send_federation_request( - &db.globals, origin, get_room_state_ids::v1::Request { room_id, @@ -546,18 +599,18 @@ impl Service { { Ok(res) => { info!("Fetching state events at event."); - let state_vec = fetch_and_handle_outliers( - db, - origin, - &res.pdu_ids - .iter() - .map(|x| Arc::from(&**x)) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; + let state_vec = self + .fetch_and_handle_outliers( + origin, + &res.pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; let mut state: BTreeMap<_, Arc> = BTreeMap::new(); for (pdu, _) in state_vec { @@ -566,13 +619,9 @@ impl Service { .clone() .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; - let shortstatekey = db + let shortstatekey = services() .rooms - .get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - &state_key, - &db.globals, - ) + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; match state.entry(shortstatekey) { @@ -587,7 +636,7 @@ impl Service { } // The original create event must still be in the state - let create_shortstatekey = db + let create_shortstatekey = services() .rooms .get_shortstatekey(&StateEventType::RoomCreate, "") .map_err(|_| "Failed to talk to db.")? @@ -618,12 +667,13 @@ impl Service { &incoming_pdu, None::, // TODO: third party invite |k, s| { - db.rooms + services() + .rooms .get_shortstatekey(&k.to_string().into(), s) .ok() .flatten() .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) + .and_then(|event_id| services().rooms.get_pdu(event_id).ok().flatten()) }, ) .map_err(|_e| "Auth check failed.".to_owned())?; @@ -636,7 +686,8 @@ impl Service { // We start looking at current room state now, so lets lock the room let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -648,7 +699,7 @@ impl Service { // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) info!("Calculating extremities"); - let mut extremities = db + let mut extremities = services() .rooms .get_pdu_leaves(room_id) .map_err(|_| "Failed to load room leaves".to_owned())?; @@ -661,14 +712,16 @@ impl Service { } // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); + extremities + .retain(|id| !matches!(services().rooms.is_event_referenced(room_id, id), Ok(true))); info!("Compressing state at event"); let state_ids_compressed = state_at_incoming_event .iter() .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, id, &db.globals) + services() + .rooms + .compress_state_event(*shortstatekey, id) .map_err(|_| "Failed to compress_state_event".to_owned()) }) .collect::>()?; @@ -676,7 +729,7 @@ impl Service { // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it info!("Starting soft fail auth check"); - let auth_events = db + let auth_events = services() .rooms .get_auth_events( room_id, @@ -696,11 +749,10 @@ impl Service { .map_err(|_e| "Auth check failed.".to_owned())?; if soft_fail { - append_incoming_pdu( - db, + self.append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(Deref::deref), + extremities.iter().map(std::ops::Deref::deref), state_ids_compressed, soft_fail, &state_lock, @@ -712,7 +764,8 @@ impl Service { // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); - db.rooms + services() + .rooms .mark_event_soft_failed(&incoming_pdu.event_id) .map_err(|_| "Failed to set soft failed flag".to_owned())?; return Err("Event has been soft failed".into()); @@ -720,13 +773,13 @@ impl Service { if incoming_pdu.state_key.is_some() { info!("Loading current room state ids"); - let current_sstatehash = db + let current_sstatehash = services() .rooms .current_shortstatehash(room_id) .map_err(|_| "Failed to load current state hash.".to_owned())? .expect("every room has state"); - let current_state_ids = db + let current_state_ids = services() .rooms .state_full_ids(current_sstatehash) .await @@ -737,14 +790,14 @@ impl Service { info!("Loading extremities"); for id in dbg!(&extremities) { - match db + match services() .rooms .get_pdu(id) .map_err(|_| "Failed to ask db for pdu.".to_owned())? { Some(leaf_pdu) => { extremity_sstatehashes.insert( - db.rooms + services() .pdu_shortstatehash(&leaf_pdu.event_id) .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? .ok_or_else(|| { @@ -777,13 +830,9 @@ impl Service { // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = db + let shortstatekey = services() .rooms - .get_or_create_shortstatekey( - &incoming_pdu.kind.to_string().into(), - state_key, - &db.globals, - ) + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) .map_err(|_| "Failed to create shortstatekey.".to_owned())?; state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); @@ -801,8 +850,9 @@ impl Service { fork_states[0] .iter() .map(|(k, id)| { - db.rooms - .compress_state_event(*k, id, &db.globals) + services() + .rooms + .compress_state_event(*k, id) .map_err(|_| "Failed to compress_state_event.".to_owned()) }) .collect::>()? @@ -814,14 +864,16 @@ impl Service { let mut auth_chain_sets = Vec::new(); for state in &fork_states { auth_chain_sets.push( - get_auth_chain( - room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? - .collect(), + services() + .rooms + .auth_chain + .get_auth_chain( + room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + ) + .await + .map_err(|_| "Failed to load auth chain.".to_owned())? + .collect(), ); } @@ -832,7 +884,8 @@ impl Service { .map(|map| { map.into_iter() .filter_map(|(k, id)| { - db.rooms + services() + .rooms .get_statekey_from_short(k) // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType @@ -846,13 +899,13 @@ impl Service { info!("Resolving state"); - let lock = db.globals.stateres_mutex.lock(); + let lock = services().globals.stateres_mutex.lock(); let state = match state_res::resolve( room_version_id, &fork_states, auth_chain_sets, |id| { - let res = db.rooms.get_pdu(id); + let res = services().rooms.get_pdu(id); if let Err(e) = &res { error!("LOOK AT ME Failed to fetch event: {}", e); } @@ -872,16 +925,13 @@ impl Service { state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = db + let shortstatekey = services() .rooms - .get_or_create_shortstatekey( - &event_type.to_string().into(), - &state_key, - &db.globals, - ) + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key) .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - db.rooms - .compress_state_event(shortstatekey, &event_id, &db.globals) + services() + .rooms + .compress_state_event(shortstatekey, &event_id) .map_err(|_| "Failed to compress state event".to_owned()) }) .collect::>()? @@ -890,8 +940,9 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); - db.rooms - .force_state(room_id, new_room_state, db) + services() + .rooms + .force_state(room_id, new_room_state) .map_err(|_| "Failed to set new room state.".to_owned())?; } } @@ -903,19 +954,19 @@ impl Service { // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let pdu_id = append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities.iter().map(Deref::deref), - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; + let pdu_id = self + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(std::ops::Deref::deref), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .map_err(|e| { + warn!("Failed to add pdu to db: {}", e); + "Failed to add pdu to db.".to_owned() + })?; info!("Appended incoming pdu"); @@ -935,15 +986,22 @@ impl Service { /// d. TODO: Ask other servers over federation? #[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( - db: &'a Database, + &self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, room_id: &'a RoomId, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { + ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> + { Box::pin(async move { - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + let back_off = |id| match services() + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry(id) + { hash_map::Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } @@ -952,10 +1010,16 @@ impl Service { let mut pdus = vec![]; for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&**id) + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&**id) { // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { min_elapsed_duration = Duration::from_secs(60 * 60 * 24); } @@ -969,7 +1033,7 @@ impl Service { // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = db.rooms.get_pdu(id) { + if let Ok(Some(local_pdu)) = services().rooms.get_pdu(id) { trace!("Found {} in db", id); pdus.push((local_pdu, None)); continue; @@ -992,16 +1056,15 @@ impl Service { tokio::task::yield_now().await; } - if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { + if let Ok(Some(_)) = services().rooms.get_pdu(&next_id) { trace!("Found {} in db", id); continue; } info!("Fetching {} over federation.", next_id); - match db + match services() .sending .send_federation_request( - &db.globals, origin, get_event::v1::Request { event_id: &next_id }, ) @@ -1010,7 +1073,7 @@ impl Service { Ok(res) => { info!("Got {} over federation", next_id); let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { + match pdu::gen_event_id_canonical_json(&res.pdu) { Ok(t) => t, Err(_) => { back_off((*next_id).to_owned()); @@ -1051,16 +1114,16 @@ impl Service { } for (next_id, value) in events_in_reverse_order.iter().rev() { - match handle_outlier_pdu( - origin, - create_event, - next_id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await + match self + .handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + pub_key_map, + ) + .await { Ok((pdu, json)) => { if next_id == id { @@ -1078,9 +1141,14 @@ impl Service { }) } - - - fn fetch_unknown_prev_events(initial_set: Vec>) -> Vec> { + async fn fetch_unknown_prev_events( + &self, + origin: &ServerName, + create_event: &PduEvent, + room_id: &RoomId, + pub_key_map: &RwLock>>, + initial_set: Vec>, + ) -> Vec<(Arc, HashMap, (Arc, BTreeMap)>)> { let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; @@ -1088,16 +1156,16 @@ impl Service { let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() + if let Some((pdu, json_opt)) = self + .fetch_and_handle_outliers( + origin, + &[prev_event_id.clone()], + &create_event, + room_id, + pub_key_map, + ) + .await + .pop() { if amount > 100 { // Max limit reached @@ -1106,9 +1174,13 @@ impl Service { continue; } - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) - { + if let Some(json) = json_opt.or_else(|| { + services() + .rooms + .get_outlier_pdu_json(&prev_event_id) + .ok() + .flatten() + }) { if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { amount += 1; for prev_prev in &pdu.prev_events { @@ -1153,6 +1225,6 @@ impl Service { }) .map_err(|_| "Error sorting prev events".to_owned())?; - sorted + (sorted, eventid_info) } } diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 9cf2d8bc..52a683d3 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,3 +1,5 @@ +use ruma::{RoomId, DeviceId, UserId}; + pub trait Data { fn lazy_load_was_sent_before( &self, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index cf00174b..bdc083a0 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,5 +1,8 @@ mod data; +use std::collections::HashSet; + pub use data::Data; +use ruma::{DeviceId, UserId, RoomId}; use crate::service::*; @@ -47,7 +50,7 @@ impl Service<_> { room_id: &RoomId, since: u64, ) -> Result<()> { - self.db.lazy_load_confirm_delivery(user_d, device_id, room_id, since) + self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, since) } #[tracing::instrument(skip(self))] @@ -57,6 +60,6 @@ impl Service<_> { device_id: &DeviceId, room_id: &RoomId, ) -> Result<()> { - self.db.lazy_load_reset(user_id, device_id, room_id); + self.db.lazy_load_reset(user_id, device_id, room_id) } } diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 58bd3510..2d718b2d 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,3 +1,5 @@ +use ruma::RoomId; + pub trait Data { fn exists(&self, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 644cd18f..8417e28e 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::RoomId; use crate::service::*; diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 89598afe..47250340 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -1,216 +1,37 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, RoomEventType, StateEventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, DeviceId, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{hash_map, BTreeMap, HashMap, HashSet}, - fmt::Debug, - iter, - mem::size_of, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled - - pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex, Arc>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, - pub(super) lasttimelinecount_cache: Mutex, u64>>, -} - -impl Rooms { - /// Returns true if a given room version is supported - #[tracing::instrument(skip(self, db))] - pub fn is_supported_version(&self, db: &Database, room_version: &RoomVersionId) -> bool { - db.globals.supported_room_versions().contains(room_version) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &RoomEventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type.to_string().into(), &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - #[tracing::instrument(skip(self))] - pub fn iter_ids(&self) -> impl Iterator>> + '_ { - self.roomid_shortroomid.iter().map(|(bytes, _)| { - RoomId::parse( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) - }) - } - - pub fn is_disabled(&self, room_id: &RoomId) -> Result { - Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) - } - +pub mod alias; +pub mod auth_chain; +pub mod directory; +pub mod edus; +pub mod event_handler; +pub mod lazy_loading; +pub mod metadata; +pub mod outlier; +pub mod pdu_metadata; +pub mod search; +pub mod short; +pub mod state; +pub mod state_accessor; +pub mod state_cache; +pub mod state_compressor; +pub mod timeline; +pub mod user; + +pub struct Service { + pub alias: alias::Service, + pub auth_chain: auth_chain::Service, + pub directory: directory::Service, + pub edus: edus::Service, + pub event_handler: event_handler::Service, + pub lazy_loading: lazy_loading::Service, + pub metadata: metadata::Service, + pub outlier: outlier::Service, + pub pdu_metadata: pdu_metadata::Service, + pub search: search::Service, + pub short: short::Service, + pub state: state::Service, + pub state_accessor: state_accessor::Service, + pub state_cache: state_cache::Service, + pub state_compressor: state_compressor::Service, + pub timeline: timeline::Service, + pub user: user::Service, } diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index 6b534b95..d579515e 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -1,3 +1,7 @@ +use ruma::{EventId, signatures::CanonicalJsonObject}; + +use crate::PduEvent; + pub trait Data { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index c82cb628..ee8b940f 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,7 +1,8 @@ mod data; pub use data::Data; +use ruma::{EventId, signatures::CanonicalJsonObject}; -use crate::service::*; +use crate::{service::*, PduEvent}; pub struct Service { db: D, diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 67787958..531823fe 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,3 +1,7 @@ +use std::sync::Arc; + +use ruma::{EventId, RoomId}; + pub trait Data { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 6d6df223..3442b830 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -1,5 +1,8 @@ mod data; +use std::sync::Arc; + pub use data::Data; +use ruma::{RoomId, EventId}; use crate::service::*; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 1601e0de..16287eba 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,7 +1,9 @@ +use ruma::RoomId; + pub trait Data { - pub fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()>; + fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()>; - pub fn search_pdus<'a>( + fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 5478273c..9087deff 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,7 +1,6 @@ mod data; pub use data::Data; - -use crate::service::*; +use ruma::RoomId; pub struct Service { db: D, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index a8e87b91..afde14e2 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -1,7 +1,10 @@ mod data; +use std::sync::Arc; + pub use data::Data; +use ruma::{EventId, events::StateEventType}; -use crate::service::*; +use crate::{service::*, Error, utils}; pub struct Service { db: D, @@ -188,7 +191,6 @@ impl Service<_> { fn get_or_create_shortstatehash( &self, state_hash: &StateHashId, - globals: &super::globals::Globals, ) -> Result<(u64, bool)> { Ok(match self.statehash_shortstatehash.get(state_hash)? { Some(shortstatehash) => ( diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 8aa76380..ac8fac21 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,14 +1,20 @@ +use std::sync::Arc; +use std::{sync::MutexGuard, collections::HashSet}; +use std::fmt::Debug; + +use ruma::{EventId, RoomId}; + pub trait Data { /// Returns the last state hash key added to the db for the given room. fn get_room_shortstatehash(room_id: &RoomId); /// Update the current state of the room. - fn set_room_state(room_id: &RoomId, new_shortstatehash: u64 + fn set_room_state(room_id: &RoomId, new_shortstatehash: u64, _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex ); /// Associates a state with an event. - fn set_event_state(shorteventid: u64, shortstatehash: u64) -> Result<()> { + fn set_event_state(shorteventid: u64, shortstatehash: u64) -> Result<()>; /// Returns all events we would send as the prev_events of the next event. fn get_forward_extremities(room_id: &RoomId) -> Result>>; @@ -18,7 +24,7 @@ pub trait Data { room_id: &RoomId, event_ids: impl IntoIterator + Debug, _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) -> Result<()>; } pub struct StateLock; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index b513ab53..6c33d521 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,7 +1,12 @@ mod data; +use std::collections::HashSet; + pub use data::Data; +use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType}, UserId, EventId, serde::Raw, RoomVersionId}; +use serde::Deserialize; +use tracing::warn; -use crate::service::*; +use crate::{service::*, SERVICE, PduEvent, Error, utils::calculate_hash}; pub struct Service { db: D, @@ -9,22 +14,20 @@ pub struct Service { impl Service<_> { /// Set the room to the given statehash and update caches. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] pub fn force_state( &self, room_id: &RoomId, shortstatehash: u64, statediffnew: HashSet, statediffremoved: HashSet, - db: &Database, ) -> Result<()> { for event_id in statediffnew.into_iter().filter_map(|new| { - state_compressor::parse_compressed_state_event(new) + SERVICE.rooms.state_compressor.parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { - let pdu = match timeline::get_pdu_json(&event_id)? { + let pdu = match SERVICE.rooms.timeline.get_pdu_json(&event_id)? { Some(pdu) => pdu, None => continue, }; @@ -60,12 +63,12 @@ impl Service<_> { Err(_) => continue, }; - room::state_cache::update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; + SERVICE.room.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; } - room::state_cache::update_joined_count(room_id, db)?; + SERVICE.room.state_cache.update_joined_count(room_id)?; - db.set_room_state(room_id, new_shortstatehash); + self.db.set_room_state(room_id, shortstatehash); Ok(()) } @@ -74,19 +77,18 @@ impl Service<_> { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] + #[tracing::instrument(skip(self, state_ids_compressed))] pub fn set_event_state( &self, event_id: &EventId, room_id: &RoomId, state_ids_compressed: HashSet, - globals: &super::globals::Globals, ) -> Result<()> { - let shorteventid = short::get_or_create_shorteventid(event_id, globals)?; + let shorteventid = SERVICE.short.get_or_create_shorteventid(event_id)?; - let previous_shortstatehash = db.get_room_shortstatehash(room_id)?; + let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; - let state_hash = super::calculate_hash( + let state_hash = calculate_hash( &state_ids_compressed .iter() .map(|s| &s[..]) @@ -94,11 +96,11 @@ impl Service<_> { ); let (shortstatehash, already_existed) = - short::get_or_create_shortstatehash(&state_hash, globals)?; + SERVICE.short.get_or_create_shortstatehash(&state_hash)?; if !already_existed { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| room::state_compressor.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| SERVICE.room.state_compressor.load_shortstatehash_info(p))?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -117,7 +119,7 @@ impl Service<_> { } else { (state_ids_compressed, HashSet::new()) }; - state_compressor::save_state_from_diff( + SERVICE.room.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -126,7 +128,7 @@ impl Service<_> { )?; } - db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + self.db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) } @@ -135,13 +137,12 @@ impl Service<_> { /// /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] + #[tracing::instrument(skip(self, new_pdu))] pub fn append_to_state( &self, new_pdu: &PduEvent, - globals: &super::globals::Globals, ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; + let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id)?; let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; @@ -157,10 +158,9 @@ impl Service<_> { let shortstatekey = self.get_or_create_shortstatekey( &new_pdu.kind.to_string().into(), state_key, - globals, )?; - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; + let new = self.compress_state_event(shortstatekey, &new_pdu.event_id)?; let replaces = states_parents .last() @@ -176,7 +176,7 @@ impl Service<_> { } // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; + let shortstatehash = SERVICE.globals.next_count()?; let mut statediffnew = HashSet::new(); statediffnew.insert(new); @@ -254,7 +254,23 @@ impl Service<_> { Ok(()) } - pub fn db(&self) -> D { - &self.db + /// Returns the room's version. + #[tracing::instrument(skip(self))] + pub fn get_room_version(&self, room_id: &RoomId) -> Result { + let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + let room_version = create_event_content + .map(|create_event| create_event.room_version) + .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; + Ok(room_version) } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index a2b76e46..bf2972f9 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,3 +1,9 @@ +use std::{sync::Arc, collections::HashMap}; + +use ruma::{EventId, events::StateEventType, RoomId}; + +use crate::PduEvent; + pub trait Data { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 28a49a98..92e5c8e1 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,7 +1,10 @@ mod data; +use std::{sync::Arc, collections::{HashMap, BTreeMap}}; + pub use data::Data; +use ruma::{events::StateEventType, RoomId, EventId}; -use crate::service::*; +use crate::{service::*, PduEvent}; pub struct Service { db: D, @@ -42,7 +45,7 @@ impl Service<_> { event_type: &StateEventType, state_key: &str, ) -> Result>> { - self.db.pdu_state_get(event_id) + self.db.pdu_state_get(shortstatehash, event_type, state_key) } /// Returns the state hash for this pdu. diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 166d4f6b..f6519196 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,3 +1,5 @@ +use ruma::{UserId, RoomId}; + pub trait Data { fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()>; } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 778679de..d29501a6 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -1,7 +1,11 @@ mod data; +use std::{collections::HashSet, sync::Arc}; + pub use data::Data; +use regex::Regex; +use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, tag::TagEvent, RoomAccountDataEventType, GlobalAccountDataEventType, direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, AnySyncStateEvent}, serde::Raw, ServerName}; -use crate::service::*; +use crate::{service::*, SERVICE, utils, Error}; pub struct Service { db: D, @@ -9,7 +13,7 @@ pub struct Service { impl Service<_> { /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] + #[tracing::instrument(skip(self, last_state))] pub fn update_membership( &self, room_id: &RoomId, @@ -17,12 +21,11 @@ impl Service<_> { membership: MembershipState, sender: &UserId, last_state: Option>>, - db: &Database, update_joined_count: bool, ) -> Result<()> { // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; + if user_id.server_name() != SERVICE.globals.server_name() { + SERVICE.users.create(user_id, None)?; // TODO: displayname, avatar url } @@ -82,7 +85,7 @@ impl Service<_> { user_id, RoomAccountDataEventType::Tag, )? { - db.account_data + SERVICE.account_data .update( Some(room_id), user_id, @@ -94,7 +97,7 @@ impl Service<_> { }; // Copy direct chat flag - if let Some(mut direct_event) = db.account_data.get::( + if let Some(mut direct_event) = SERVICE.account_data.get::( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), @@ -109,12 +112,11 @@ impl Service<_> { } if room_ids_updated { - db.account_data.update( + SERVICE.account_data.update( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), &direct_event, - &db.globals, )?; } }; @@ -130,7 +132,7 @@ impl Service<_> { } MembershipState::Invite => { // We want to know if the sender is ignored by the receiver - let is_ignored = db + let is_ignored = SERVICE .account_data .get::( None, // Ignored users are in global account data @@ -186,7 +188,7 @@ impl Service<_> { } #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { + pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { let mut joinedcount = 0_u64; let mut invitedcount = 0_u64; let mut joined_servers = HashSet::new(); @@ -226,11 +228,10 @@ impl Service<_> { Ok(()) } - #[tracing::instrument(skip(self, room_id, db))] + #[tracing::instrument(skip(self, room_id))] pub fn get_our_real_users( &self, room_id: &RoomId, - db: &Database, ) -> Result>>> { let maybe = self .our_real_users_cache @@ -241,7 +242,7 @@ impl Service<_> { if let Some(users) = maybe { Ok(users) } else { - self.update_joined_count(room_id, db)?; + self.update_joined_count(room_id)?; Ok(Arc::clone( self.our_real_users_cache .read() @@ -252,12 +253,11 @@ impl Service<_> { } } - #[tracing::instrument(skip(self, room_id, appservice, db))] + #[tracing::instrument(skip(self, room_id, appservice))] pub fn appservice_in_room( &self, room_id: &RoomId, appservice: &(String, serde_yaml::Value), - db: &Database, ) -> Result { let maybe = self .appservice_in_room_cache @@ -285,7 +285,7 @@ impl Service<_> { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() + UserId::parse_with_server_name(string, SERVICE.globals.server_name()).ok() }); let in_room = bridge_user_id diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index 8b855cd2..74a28e7b 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,4 +1,6 @@ -struct StateDiff { +use crate::service::rooms::CompressedStateEvent; + +pub struct StateDiff { parent: Option, added: Vec, removed: Vec, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index d6d88e25..3aea4fe6 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,7 +1,12 @@ -mod data; +pub mod data; +use std::{mem::size_of, sync::Arc, collections::HashSet}; + pub use data::Data; +use ruma::{EventId, RoomId}; + +use crate::{service::*, utils}; -use crate::service::*; +use self::data::StateDiff; pub struct Service { db: D, @@ -30,9 +35,9 @@ impl Service<_> { return Ok(r.clone()); } - self.db.get_statediff(shortstatehash)?; + let StateDiff { parent, added, removed } = self.db.get_statediff(shortstatehash)?; - if parent != 0_u64 { + if let Some(parent) = parent { let mut response = self.load_shortstatehash_info(parent)?; let mut state = response.last().unwrap().1.clone(); state.extend(added.iter().copied()); @@ -155,7 +160,7 @@ impl Service<_> { if parent_states.is_empty() { // There is no parent layer, create a new state - self.db.save_statediff(shortstatehash, StateDiff { parent: 0, new: statediffnew, removed: statediffremoved })?; + self.db.save_statediff(shortstatehash, StateDiff { parent: None, added: statediffnew, removed: statediffremoved })?; return Ok(()); }; @@ -197,7 +202,7 @@ impl Service<_> { )?; } else { // Diff small enough, we add diff as layer on top of parent - self.db.save_statediff(shortstatehash, StateDiff { parent: parent.0, new: statediffnew, removed: statediffremoved })?; + self.db.save_statediff(shortstatehash, StateDiff { parent: Some(parent.0), added: statediffnew, removed: statediffremoved })?; } Ok(()) diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 4e5c3796..bf6d8c5e 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,3 +1,9 @@ +use std::sync::Arc; + +use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; + +use crate::PduEvent; + pub trait Data { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; @@ -5,34 +11,37 @@ pub trait Data { fn get_pdu_count(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - pub fn get_pdu_json(&self, event_id: &EventId) -> Result>; + fn get_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - pub fn get_non_outlier_pdu_json( + fn get_non_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result>; /// Returns the pdu's id. - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>>; + fn get_pdu_id(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; + fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; /// Returns the pdu. /// /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - pub fn get_pdu(&self, event_id: &EventId) -> Result>>; + fn get_pdu(&self, event_id: &EventId) -> Result>>; /// Returns the pdu. /// /// This does __NOT__ check the outliers `Tree`. - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; + fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the pdu as a `BTreeMap`. - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; + fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; /// Returns the `count` of this pdu's id. - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result; + fn pdu_count(&self, pdu_id: &[u8]) -> Result; /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; @@ -40,7 +49,7 @@ pub trait Data { /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( + fn pdus_since<'a>( &'a self, user_id: &UserId, room_id: &RoomId, @@ -50,14 +59,14 @@ pub trait Data { /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( + fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, until: u64, ) -> Result, PduEvent)>> + 'a>; - pub fn pdus_after<'a>( + fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index c6393c68..7b60fe5d 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,7 +1,17 @@ mod data; +use std::{sync::MutexGuard, iter, collections::HashSet}; +use std::fmt::Debug; + pub use data::Data; +use regex::Regex; +use ruma::signatures::CanonicalJsonValue; +use ruma::{EventId, signatures::CanonicalJsonObject, push::{Action, Tweak}, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType, RoomEventType, room::{member::MembershipState, create::RoomCreateEventContent}, StateEventType}, UserId, RoomAliasId, RoomId, uint, state_res, api::client::error::ErrorKind, serde::to_canonical_value, ServerName}; +use serde::Deserialize; +use serde_json::value::to_raw_value; +use tracing::{warn, error}; -use crate::service::*; +use crate::SERVICE; +use crate::{service::{*, pdu::{PduBuilder, EventHash}}, Error, PduEvent, utils}; pub struct Service { db: D, @@ -126,13 +136,12 @@ impl Service<_> { /// in `append_pdu`. /// /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] + #[tracing::instrument(skip(self, pdu, pdu_json, leaves))] pub fn append_pdu<'a>( &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, leaves: impl IntoIterator + Debug, - db: &Database, ) -> Result> { let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); @@ -249,7 +258,6 @@ impl Service<_> { &power_levels, &sync_pdu, &pdu.room_id, - db, )? { match action { Action::DontNotify => notify = false, @@ -446,9 +454,8 @@ impl Service<_> { pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - db: &Database, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObj) { + ) -> (PduEvent, CanonicalJsonObject) { let PduBuilder { event_type, content, @@ -457,14 +464,14 @@ impl Service<_> { redacts, } = pdu_builder; - let prev_events: Vec<_> = db + let prev_events: Vec<_> = SERVICE .rooms .get_pdu_leaves(room_id)? .into_iter() .take(20) .collect(); - let create_event = db + let create_event = SERVICE .rooms .room_state_get(room_id, &StateEventType::RoomCreate, "")?; @@ -481,7 +488,7 @@ impl Service<_> { // If there was no create event yet, assume we are creating a room with the default // version right now let room_version_id = create_event_content - .map_or(db.globals.default_room_version(), |create_event| { + .map_or(SERVICE.globals.default_room_version(), |create_event| { create_event.room_version }); let room_version = @@ -575,8 +582,8 @@ impl Service<_> { ); match ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), + SERVICE.globals.server_name().as_str(), + SERVICE.globals.keypair(), &mut pdu_json, &room_version_id, ) { @@ -614,22 +621,21 @@ impl Service<_> { /// Creates a new persisted data unit and adds it to a room. This function takes a /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, db, _mutex_lock))] + #[tracing::instrument(skip(self, _mutex_lock))] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - db: &Database, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let (pdu, pdu_json) = create_hash_and_sign_event()?; + let (pdu, pdu_json) = self.create_hash_and_sign_event()?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; + let statehashid = self.append_to_state(&pdu)?; let pdu_id = self.append_pdu( &pdu, @@ -637,7 +643,6 @@ impl Service<_> { // Since this PDU references all pdu_leaves we can update the leaves // of the room iter::once(&*pdu.event_id), - db, )?; // We set the room state after inserting the pdu, so that we never have a moment in time @@ -659,9 +664,9 @@ impl Service<_> { } // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(db.globals.server_name()); + servers.remove(SERVICE.globals.server_name()); - db.sending.send_pdu(servers.into_iter(), &pdu_id)?; + SERVICE.sending.send_pdu(servers.into_iter(), &pdu_id)?; Ok(pdu.event_id) } @@ -670,7 +675,6 @@ impl Service<_> { /// server that sent the event. #[tracing::instrument(skip_all)] fn append_incoming_pdu<'a>( - db: &Database, pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: impl IntoIterator + Clone + Debug, @@ -680,21 +684,20 @@ impl Service<_> { ) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( + SERVICE.rooms.set_event_state( &pdu.event_id, &pdu.room_id, state_ids_compressed, - &db.globals, )?; if soft_fail { - db.rooms + SERVICE.rooms .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + SERVICE.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; return Ok(None); } - let pdu_id = db.rooms.append_pdu(pdu, pdu_json, new_room_leaves, db)?; + let pdu_id = SERVICE.rooms.append_pdu(pdu, pdu_json, new_room_leaves)?; Ok(Some(pdu_id)) } @@ -756,4 +759,4 @@ impl Service<_> { // If event does not exist, just noop Ok(()) } - +} diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 45fb3551..664f8a0a 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::{RoomId, UserId}; use crate::service::*; diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index f1ff5f88..c1b47154 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,5 +1,5 @@ pub trait Data { - pub fn add_txnid( + fn add_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, @@ -7,7 +7,7 @@ pub trait Data { data: &[u8], ) -> Result<()>; - pub fn existing_txnid( + fn existing_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index d944847e..9b76e13b 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::{UserId, DeviceId, TransactionId}; use crate::service::*; diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index 40e69bda..cc943bff 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,3 +1,5 @@ +use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId, signatures::CanonicalJsonValue}; + pub trait Data { fn set_uiaa_request( &self, diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 593ea5f2..5e1df8f3 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,7 +1,9 @@ mod data; pub use data::Data; +use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; +use tracing::error; -use crate::service::*; +use crate::{service::*, utils, Error, SERVICE}; pub struct Service { db: D, @@ -36,8 +38,6 @@ impl Service<_> { device_id: &DeviceId, auth: &IncomingAuthData, uiaainfo: &UiaaInfo, - users: &super::users::Users, - globals: &super::globals::Globals, ) -> Result<(bool, UiaaInfo)> { let mut uiaainfo = auth .session() @@ -66,13 +66,13 @@ impl Service<_> { }; let user_id = - UserId::parse_with_server_name(username.clone(), globals.server_name()) + UserId::parse_with_server_name(username.clone(), SERVICE.globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") })?; // Check if password is correct - if let Some(hash) = users.password_hash(&user_id)? { + if let Some(hash) = SERVICE.users.password_hash(&user_id)? { let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); diff --git a/src/service/users/data.rs b/src/service/users/data.rs index d99d0328..327e0c69 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,34 +1,27 @@ -pub trait Data { - /// Check if a user has an account on this homeserver. - pub fn exists(&self, user_id: &UserId) -> Result; +use std::collections::BTreeMap; - /// Check if account is deactivated - pub fn is_deactivated(&self, user_id: &UserId) -> Result; +use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri}; - /// Check if a user is an admin - pub fn is_admin( - &self, - user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, - ) -> Result; +trait Data { + /// Check if a user has an account on this homeserver. + fn exists(&self, user_id: &UserId) -> Result; - /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; + /// Check if account is deactivated + fn is_deactivated(&self, user_id: &UserId) -> Result; /// Returns the number of users registered on this server. - pub fn count(&self) -> Result; + fn count(&self) -> Result; /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result, String)>>; + fn find_from_token(&self, token: &str) -> Result, String)>>; /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator>> + '_; + fn iter(&self) -> impl Iterator>> + '_; /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. - pub fn list_local_users(&self) -> Result>; + fn list_local_users(&self) -> Result>; /// Will only return with Some(username) if the password was not empty and the /// username could be successfully parsed. @@ -37,31 +30,31 @@ pub trait Data { fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option; /// Returns the password hash for the given user. - pub fn password_hash(&self, user_id: &UserId) -> Result>; + fn password_hash(&self, user_id: &UserId) -> Result>; /// Hash and set the user's password to the Argon2 hash - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; + fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; /// Returns the displayname of a user on this homeserver. - pub fn displayname(&self, user_id: &UserId) -> Result>; + fn displayname(&self, user_id: &UserId) -> Result>; /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; + fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; /// Get the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result>>; + fn avatar_url(&self, user_id: &UserId) -> Result>>; /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()>; + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()>; /// Get the blurhash of a user. - pub fn blurhash(&self, user_id: &UserId) -> Result>; + fn blurhash(&self, user_id: &UserId) -> Result>; /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; + fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; /// Adds a new device to a user. - pub fn create_device( + fn create_device( &self, user_id: &UserId, device_id: &DeviceId, @@ -70,129 +63,118 @@ pub trait Data { ) -> Result<()>; /// Removes a device from a user. - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; /// Returns an iterator over all device ids of this user. - pub fn all_device_ids<'a>( + fn all_device_ids<'a>( &'a self, user_id: &UserId, ) -> impl Iterator>> + 'a; /// Replaces the access token of one device. - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; - pub fn add_one_time_key( + fn add_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; + fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; - pub fn take_one_time_key( + fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - globals: &super::globals::Globals, ) -> Result, Raw)>>; - pub fn count_one_time_keys( + fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>; - pub fn add_device_keys( + fn add_device_keys( &self, user_id: &UserId, device_id: &DeviceId, device_keys: &Raw, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn add_cross_signing_keys( + fn add_cross_signing_keys( &self, user_id: &UserId, master_key: &Raw, self_signing_key: &Option>, user_signing_key: &Option>, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn sign_key( + fn sign_key( &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn keys_changed<'a>( + fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, to: Option, ) -> impl Iterator>> + 'a; - pub fn mark_device_key_update( + fn mark_device_key_update( &self, user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn get_device_keys( + fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>>; - pub fn get_master_key bool>( + fn get_master_key bool>( &self, user_id: &UserId, allowed_signatures: F, ) -> Result>>; - pub fn get_self_signing_key bool>( + fn get_self_signing_key bool>( &self, user_id: &UserId, allowed_signatures: F, ) -> Result>>; - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; + fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; - pub fn add_to_device_event( + fn add_to_device_event( &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, - globals: &super::globals::Globals, ) -> Result<()>; - pub fn get_to_device_events( + fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>>; - pub fn remove_to_device_events( + fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, until: u64, ) -> Result<()>; - pub fn update_device_metadata( + fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, @@ -200,27 +182,27 @@ pub trait Data { ) -> Result<()>; /// Get device metadata. - pub fn get_device_metadata( + fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, ) -> Result>; - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result>; + fn get_devicelist_version(&self, user_id: &UserId) -> Result>; - pub fn all_devices_metadata<'a>( + fn all_devices_metadata<'a>( &'a self, user_id: &UserId, ) -> impl Iterator> + 'a; /// Creates a new sync filter. Returns the filter id. - pub fn create_filter( + fn create_filter( &self, user_id: &UserId, filter: &IncomingFilterDefinition, ) -> Result; - pub fn get_filter( + fn get_filter( &self, user_id: &UserId, filter_id: &str, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 93d6ea52..bfa4b8e5 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,7 +1,10 @@ mod data; +use std::{collections::BTreeMap, mem}; + pub use data::Data; +use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}}; -use crate::service::*; +use crate::{service::*, Error}; pub struct Service { db: D, @@ -19,18 +22,24 @@ impl Service<_> { } /// Check if a user is an admin - pub fn is_admin( + fn is_admin( &self, user_id: &UserId, ) -> Result { - self.db.is_admin(user_id) + let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); + + rooms.is_joined(user_id, &admin_room_id) } /// Create a new user account on this homeserver. - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.db.set_password(user_id, password) + fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + self.db.set_password(user_id, password)?; + Ok(()) } + /// Returns the number of users registered on this server. pub fn count(&self) -> Result { self.db.count() @@ -136,7 +145,6 @@ impl Service<_> { device_id: &DeviceId, one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, - globals: &super::globals::Globals, ) -> Result<()> { self.db.add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) } @@ -220,7 +228,7 @@ impl Service<_> { user_id: &UserId, allowed_signatures: F, ) -> Result>> { - self.db.get_master_key(user_id, allow_signatures) + self.db.get_master_key(user_id, allowed_signatures) } pub fn get_self_signing_key bool>( @@ -327,7 +335,7 @@ impl Service<_> { } /// Ensure that a user only sees signatures from themselves and the target user -fn clean_signatures bool>( +pub fn clean_signatures bool>( cross_signing_key: &mut serde_json::Value, user_id: &UserId, allowed_signatures: F, diff --git a/src/utils/utils.rs b/src/utils/mod.rs similarity index 99% rename from src/utils/utils.rs rename to src/utils/mod.rs index 1ad0aa3f..734da2a8 100644 --- a/src/utils/utils.rs +++ b/src/utils/mod.rs @@ -1,3 +1,5 @@ +pub mod error; + use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; From 4649cd82b514a4590aade5f08f1c3514fe9998f6 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:05 +0200 Subject: [PATCH 377/445] refactor: prepare database/key_value/globals.rs from service/globals.rs --- src/{service => database/key_value}/globals.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/globals.rs (100%) diff --git a/src/service/globals.rs b/src/database/key_value/globals.rs similarity index 100% rename from src/service/globals.rs rename to src/database/key_value/globals.rs From d024d205c03f74053c3a0285dc05557f1e2f8663 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:06 +0200 Subject: [PATCH 378/445] refactor: prepare service/media/mod.rs from service/media.rs --- src/service/{media.rs => media/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{media.rs => media/mod.rs} (100%) diff --git a/src/service/media.rs b/src/service/media/mod.rs similarity index 100% rename from src/service/media.rs rename to src/service/media/mod.rs From 5a29511d3406cd6c7c25afebe8285528c7be6b18 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:06 +0200 Subject: [PATCH 379/445] refactor: prepare service/key_backups/data.rs from service/key_backups.rs --- src/service/{key_backups.rs => key_backups/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{key_backups.rs => key_backups/data.rs} (100%) diff --git a/src/service/key_backups.rs b/src/service/key_backups/data.rs similarity index 100% rename from src/service/key_backups.rs rename to src/service/key_backups/data.rs From c6d1421e81dcf434eec3a3f937a53faee0969fb9 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:06 +0200 Subject: [PATCH 380/445] refactor: prepare service/key_backups/mod.rs from service/key_backups.rs --- src/service/{key_backups.rs => key_backups/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{key_backups.rs => key_backups/mod.rs} (100%) diff --git a/src/service/key_backups.rs b/src/service/key_backups/mod.rs similarity index 100% rename from src/service/key_backups.rs rename to src/service/key_backups/mod.rs From e1e87b8d0c1717b6485fcb0ba0970630fcbc5f2d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:07 +0200 Subject: [PATCH 381/445] refactor: prepare service/admin/mod.rs from service/admin.rs --- src/service/{admin.rs => admin/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{admin.rs => admin/mod.rs} (100%) diff --git a/src/service/admin.rs b/src/service/admin/mod.rs similarity index 100% rename from src/service/admin.rs rename to src/service/admin/mod.rs From efad401751d7ff6d87c2d5aac296326217e9aa28 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:07 +0200 Subject: [PATCH 382/445] refactor: prepare service/account_data/data.rs from service/account_data.rs --- src/service/{account_data.rs => account_data/data.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{account_data.rs => account_data/data.rs} (100%) diff --git a/src/service/account_data.rs b/src/service/account_data/data.rs similarity index 100% rename from src/service/account_data.rs rename to src/service/account_data/data.rs From 7946c5f29e0545e9ee65b56503a2524d7a5ffc66 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:07 +0200 Subject: [PATCH 383/445] refactor: prepare service/account_data/mod.rs from service/account_data.rs --- src/service/{account_data.rs => account_data/mod.rs} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/service/{account_data.rs => account_data/mod.rs} (100%) diff --git a/src/service/account_data.rs b/src/service/account_data/mod.rs similarity index 100% rename from src/service/account_data.rs rename to src/service/account_data/mod.rs From 232978087ae24d0c7c9aff4147dee105b8fbaf17 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 13:52:07 +0200 Subject: [PATCH 384/445] refactor: prepare database/key_value/media.rs from service/media.rs --- src/{service => database/key_value}/media.rs | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename src/{service => database/key_value}/media.rs (100%) diff --git a/src/service/media.rs b/src/database/key_value/media.rs similarity index 100% rename from src/service/media.rs rename to src/database/key_value/media.rs From bd8b616ca01983b5f029095e168146693ffe9dcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 7 Sep 2022 13:25:51 +0200 Subject: [PATCH 385/445] Fixed more compile time errors --- src/api/client_server/account.rs | 4 +- src/api/client_server/alias.rs | 12 +- src/api/client_server/context.rs | 31 +- src/api/client_server/directory.rs | 15 +- src/api/client_server/keys.rs | 2 +- src/api/client_server/media.rs | 6 +- src/api/client_server/membership.rs | 38 +- src/api/client_server/room.rs | 2 +- src/api/client_server/state.rs | 2 +- src/api/server_server.rs | 2 +- src/database/key_value/account_data.rs | 25 +- src/database/key_value/appservice.rs | 4 +- src/database/key_value/globals.rs | 404 +--- src/database/key_value/key_backups.rs | 49 +- src/database/key_value/media.rs | 347 +--- src/database/key_value/mod.rs | 8 +- src/database/key_value/pusher.rs | 4 +- src/database/key_value/rooms/alias.rs | 8 +- src/database/key_value/rooms/auth_chain.rs | 12 +- src/database/key_value/rooms/directory.rs | 4 +- src/database/key_value/rooms/edus/mod.rs | 4 + src/database/key_value/rooms/edus/presence.rs | 4 +- .../key_value/rooms/edus/read_receipt.rs | 6 +- src/database/key_value/rooms/edus/typing.rs | 4 +- src/database/key_value/rooms/lazy_load.rs | 2 +- src/database/key_value/rooms/metadata.rs | 2 +- src/database/key_value/rooms/mod.rs | 8 +- src/database/key_value/rooms/outlier.rs | 2 +- src/database/key_value/rooms/pdu_metadata.rs | 2 +- src/database/key_value/rooms/search.rs | 6 +- src/database/key_value/rooms/short.rs | 4 + src/database/key_value/rooms/state.rs | 4 +- .../key_value/rooms/state_accessor.rs | 2 +- src/database/key_value/rooms/state_cache.rs | 70 +- .../key_value/rooms/state_compressor.rs | 2 +- src/database/key_value/rooms/timeline.rs | 8 +- src/database/key_value/rooms/user.rs | 4 +- src/database/key_value/transaction_ids.rs | 2 +- src/database/key_value/uiaa.rs | 6 +- src/database/key_value/users.rs | 10 +- src/database/mod.rs | 7 +- src/lib.rs | 8 +- src/service/account_data/data.rs | 137 +- src/service/account_data/mod.rs | 21 +- src/service/admin/mod.rs | 1793 +++++++++-------- src/service/appservice/data.rs | 5 +- src/service/appservice/mod.rs | 4 +- src/service/globals/data.rs | 8 + src/service/globals/mod.rs | 44 +- src/service/key_backups/data.rs | 346 +--- src/service/key_backups/mod.rs | 9 +- src/service/media/data.rs | 8 + src/service/media/mod.rs | 195 +- src/service/mod.rs | 27 +- src/service/pdu.rs | 4 +- src/service/pusher/data.rs | 3 +- src/service/pusher/mod.rs | 4 +- src/service/rooms/alias/data.rs | 11 +- src/service/rooms/alias/mod.rs | 6 +- src/service/rooms/auth_chain/data.rs | 5 +- src/service/rooms/auth_chain/mod.rs | 4 +- src/service/rooms/directory/data.rs | 9 +- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/edus/mod.rs | 4 +- src/service/rooms/edus/presence/data.rs | 1 + src/service/rooms/edus/presence/mod.rs | 4 +- src/service/rooms/edus/read_receipt/data.rs | 5 +- src/service/rooms/edus/read_receipt/mod.rs | 4 +- src/service/rooms/edus/typing/data.rs | 4 +- src/service/rooms/edus/typing/mod.rs | 6 +- src/service/rooms/event_handler/mod.rs | 7 +- src/service/rooms/lazy_loading/data.rs | 1 + src/service/rooms/lazy_loading/mod.rs | 4 +- src/service/rooms/metadata/data.rs | 1 + src/service/rooms/metadata/mod.rs | 4 +- src/service/rooms/mod.rs | 4 +- src/service/rooms/outlier/data.rs | 4 +- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/pdu_metadata/data.rs | 1 + src/service/rooms/pdu_metadata/mod.rs | 4 +- src/service/rooms/search/data.rs | 5 +- src/service/rooms/search/mod.rs | 4 +- src/service/rooms/short/data.rs | 2 + src/service/rooms/short/mod.rs | 19 +- src/service/rooms/state/data.rs | 22 +- src/service/rooms/state/mod.rs | 28 +- src/service/rooms/state_accessor/data.rs | 6 +- src/service/rooms/state_accessor/mod.rs | 4 +- src/service/rooms/state_cache/data.rs | 8 +- src/service/rooms/state_cache/mod.rs | 63 +- src/service/rooms/state_compressor/data.rs | 7 +- src/service/rooms/state_compressor/mod.rs | 16 +- src/service/rooms/timeline/data.rs | 10 +- src/service/rooms/timeline/mod.rs | 103 +- src/service/rooms/user/data.rs | 5 +- src/service/rooms/user/mod.rs | 6 +- src/service/{sending.rs => sending/mod.rs} | 112 +- src/service/transaction_ids/data.rs | 3 + src/service/transaction_ids/mod.rs | 6 +- src/service/uiaa/data.rs | 1 + src/service/uiaa/mod.rs | 13 +- src/service/users/data.rs | 12 +- src/service/users/mod.rs | 16 +- 103 files changed, 1597 insertions(+), 2729 deletions(-) create mode 100644 src/database/key_value/rooms/short.rs create mode 100644 src/service/globals/data.rs create mode 100644 src/service/media/data.rs create mode 100644 src/service/rooms/short/data.rs rename src/service/{sending.rs => sending/mod.rs} (88%) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 848bfaa7..6af597e1 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{ - utils, Error, Result, Ruma, services, + utils, Error, Result, Ruma, services, api::client_server, }; use ruma::{ api::client::{ @@ -381,7 +381,7 @@ pub async fn deactivate_route( } // Make the user leave all rooms before deactivation - services().rooms.leave_all_rooms(&sender_user).await?; + client_server::leave_all_rooms(&sender_user).await?; // Remove devices and mark account as deactivated services().users.deactivate_account(sender_user)?; diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 7aa5fb2c..444cc15f 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -25,12 +25,12 @@ pub async fn create_alias_route( )); } - if services().rooms.id_from_alias(&body.room_alias)?.is_some() { + if services().rooms.alias.resolve_local_alias(&body.room_alias)?.is_some() { return Err(Error::Conflict("Alias already exists.")); } - services().rooms - .set_alias(&body.room_alias, Some(&body.room_id))?; + services().rooms.alias + .set_alias(&body.room_alias, &body.room_id)?; Ok(create_alias::v3::Response::new()) } @@ -51,7 +51,7 @@ pub async fn delete_alias_route( )); } - services().rooms.set_alias(&body.room_alias, None)?; + services().rooms.alias.remove_alias(&body.room_alias)?; // TODO: update alt_aliases? @@ -88,7 +88,7 @@ pub(crate) async fn get_alias_helper( } let mut room_id = None; - match services().rooms.id_from_alias(room_alias)? { + match services().rooms.alias.resolve_local_alias(room_alias)? { Some(r) => room_id = Some(r), None => { for (_id, registration) in services().appservice.all()? { @@ -115,7 +115,7 @@ pub(crate) async fn get_alias_helper( .await .is_ok() { - room_id = Some(services().rooms.id_from_alias(room_alias)?.ok_or_else(|| { + room_id = Some(services().rooms.alias.resolve_local_alias(room_alias)?.ok_or_else(|| { Error::bad_config("Appservice lied to us. Room does not exist.") })?); break; diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index 3551dcfd..c407c71e 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -29,16 +29,18 @@ pub async fn get_context_route( let base_pdu_id = services() .rooms + .timeline .get_pdu_id(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Base event id not found.", ))?; - let base_token = services().rooms.pdu_count(&base_pdu_id)?; + let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?; let base_event = services() .rooms + .timeline .get_pdu_from_id(&base_pdu_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -47,14 +49,14 @@ pub async fn get_context_route( let room_id = base_event.room_id.clone(); - if !services().rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -68,6 +70,7 @@ pub async fn get_context_route( let events_before: Vec<_> = services() .rooms + .timeline .pdus_until(sender_user, &room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { @@ -79,7 +82,7 @@ pub async fn get_context_route( .collect(); for (_, event) in &events_before { - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -92,7 +95,7 @@ pub async fn get_context_route( let start_token = events_before .last() - .and_then(|(pdu_id, _)| services().rooms.pdu_count(pdu_id).ok()) + .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); let events_before: Vec<_> = events_before @@ -102,6 +105,7 @@ pub async fn get_context_route( let events_after: Vec<_> = services() .rooms + .timeline .pdus_after(sender_user, &room_id, base_token)? .take( u32::try_from(body.limit).map_err(|_| { @@ -113,7 +117,7 @@ pub async fn get_context_route( .collect(); for (_, event) in &events_after { - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &room_id, @@ -124,7 +128,7 @@ pub async fn get_context_route( } } - let shortstatehash = match services().rooms.pdu_shortstatehash( + let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash( events_after .last() .map_or(&*body.event_id, |(_, e)| &*e.event_id), @@ -132,15 +136,16 @@ pub async fn get_context_route( Some(s) => s, None => services() .rooms - .current_shortstatehash(&room_id)? + .state + .get_room_shortstatehash(&room_id)? .expect("All rooms have state"), }; - let state_ids = services().rooms.state_full_ids(shortstatehash).await?; + let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; let end_token = events_after .last() - .and_then(|(pdu_id, _)| services().rooms.pdu_count(pdu_id).ok()) + .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) .map(|count| count.to_string()); let events_after: Vec<_> = events_after @@ -151,10 +156,10 @@ pub async fn get_context_route( let mut state = Vec::new(); for (shortstatekey, id) in state_ids { - let (event_type, state_key) = services().rooms.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services().rooms.short.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -163,7 +168,7 @@ pub async fn get_context_route( }; state.push(pdu.to_state_event()); } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 87493fa0..2a60f672 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -86,10 +86,10 @@ pub async fn set_room_visibility_route( match &body.visibility { room::Visibility::Public => { - services().rooms.set_public(&body.room_id, true)?; + services().rooms.directory.set_public(&body.room_id)?; info!("{} made {} public", sender_user, body.room_id); } - room::Visibility::Private => services().rooms.set_public(&body.room_id, false)?, + room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?, _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -108,7 +108,7 @@ pub async fn get_room_visibility_route( body: Ruma, ) -> Result { Ok(get_room_visibility::v3::Response { - visibility: if services().rooms.is_public_room(&body.room_id)? { + visibility: if services().rooms.directory.is_public_room(&body.room_id)? { room::Visibility::Public } else { room::Visibility::Private @@ -176,6 +176,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( let mut all_rooms: Vec<_> = services() .rooms + .directory .public_rooms() .map(|room_id| { let room_id = room_id?; @@ -183,6 +184,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( let chunk = PublicRoomsChunk { canonical_alias: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) @@ -193,6 +195,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, name: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) @@ -203,6 +206,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, num_joined_members: services() .rooms + .state_cache .room_joined_count(&room_id)? .unwrap_or_else(|| { warn!("Room {} has no member count", room_id); @@ -212,6 +216,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .expect("user count should not be that big"), topic: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) @@ -222,6 +227,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, world_readable: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) @@ -236,6 +242,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, guest_can_join: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) @@ -248,6 +255,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( })?, avatar_url: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? .map(|s| { serde_json::from_str(s.content.get()) @@ -261,6 +269,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( .flatten(), join_rule: services() .rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { serde_json::from_str(s.content.get()) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 698bd1ec..4ce5d4c0 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -230,7 +230,7 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); - for room_id in services().rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) { + for room_id in services().rooms.state_cache.rooms_joined(sender_user).filter_map(|r| r.ok()) { device_list_updates.extend( services().users .keys_changed( diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index f0da0849..d6e8213c 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -99,7 +99,7 @@ pub async fn get_content_route( content_disposition, content_type, file, - }) = services().media.get(&mxc).await? + }) = services().media.get(mxc.clone()).await? { Ok(get_content::v3::Response { file, @@ -129,7 +129,7 @@ pub async fn get_content_as_filename_route( content_disposition: _, content_type, file, - }) = services().media.get(&mxc).await? + }) = services().media.get(mxc.clone()).await? { Ok(get_content_as_filename::v3::Response { file, @@ -165,7 +165,7 @@ pub async fn get_content_thumbnail_route( }) = services() .media .get_thumbnail( - &mxc, + mxc.clone(), body.width .try_into() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index b000ec1b..d6f820a7 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -30,7 +30,7 @@ use std::{ }; use tracing::{debug, error, warn}; -use crate::{services, PduEvent, service::pdu::{gen_event_id_canonical_json, PduBuilder}, Error, api::{server_server}, utils, Ruma}; +use crate::{Result, services, PduEvent, service::pdu::{gen_event_id_canonical_json, PduBuilder}, Error, api::{server_server, client_server}, utils, Ruma}; use super::get_alias_helper; @@ -48,6 +48,7 @@ pub async fn join_room_by_id_route( let mut servers = Vec::new(); // There is no body.server_name for /roomId/join servers.extend( services().rooms + .state_cache .invite_state(sender_user, &body.room_id)? .unwrap_or_default() .iter() @@ -88,6 +89,7 @@ pub async fn join_room_by_id_or_alias_route( let mut servers = body.server_name.clone(); servers.extend( services().rooms + .state_cache .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() @@ -131,7 +133,7 @@ pub async fn leave_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.leave_room(sender_user, &body.room_id).await?; + leave_room(sender_user, &body.room_id).await?; Ok(leave_room::v3::Response::new()) } @@ -162,6 +164,7 @@ pub async fn kick_user_route( let mut event: RoomMemberEventContent = serde_json::from_str( services().rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -189,7 +192,7 @@ pub async fn kick_user_route( ); let state_lock = mutex_state.lock().await; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -219,6 +222,7 @@ pub async fn ban_user_route( let event = services() .rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -255,7 +259,7 @@ pub async fn ban_user_route( ); let state_lock = mutex_state.lock().await; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -283,6 +287,7 @@ pub async fn unban_user_route( let mut event: RoomMemberEventContent = serde_json::from_str( services().rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -309,7 +314,7 @@ pub async fn unban_user_route( ); let state_lock = mutex_state.lock().await; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -340,7 +345,7 @@ pub async fn forget_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.forget(&body.room_id, sender_user)?; + services().rooms.state_cache.forget(&body.room_id, sender_user)?; Ok(forget_room::v3::Response::new()) } @@ -356,6 +361,7 @@ pub async fn joined_rooms_route( Ok(joined_rooms::v3::Response { joined_rooms: services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect(), @@ -373,7 +379,7 @@ pub async fn get_member_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -383,6 +389,7 @@ pub async fn get_member_events_route( Ok(get_member_events::v3::Response { chunk: services() .rooms + .state_accessor .room_state_full(&body.room_id) .await? .iter() @@ -403,7 +410,7 @@ pub async fn joined_members_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -411,7 +418,7 @@ pub async fn joined_members_route( } let mut joined = BTreeMap::new(); - for user_id in services().rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { + for user_id in services().rooms.state_cache.room_members(&body.room_id).filter_map(|r| r.ok()) { let display_name = services().users.displayname(&user_id)?; let avatar_url = services().users.avatar_url(&user_id)?; @@ -446,7 +453,7 @@ async fn join_room_by_id_helper( let state_lock = mutex_state.lock().await; // Ask a remote server if we don't have this room - if !services().rooms.exists(room_id)? { + if !services().rooms.metadata.exists(room_id)? { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); @@ -553,7 +560,7 @@ async fn join_room_by_id_helper( ) .await?; - services().rooms.get_or_create_shortroomid(room_id, &services().globals)?; + services().rooms.short.get_or_create_shortroomid(room_id)?; let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; @@ -586,7 +593,7 @@ async fn join_room_by_id_helper( services().rooms.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = services().rooms.get_or_create_shortstatekey( + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( &pdu.kind.to_string().into(), state_key, )?; @@ -594,7 +601,7 @@ async fn join_room_by_id_helper( } } - let incoming_shortstatekey = services().rooms.get_or_create_shortstatekey( + let incoming_shortstatekey = services().rooms.short.get_or_create_shortstatekey( &parsed_pdu.kind.to_string().into(), parsed_pdu .state_key @@ -606,6 +613,7 @@ async fn join_room_by_id_helper( let create_shortstatekey = services() .rooms + .short .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); @@ -613,7 +621,7 @@ async fn join_room_by_id_helper( return Err(Error::BadServerResponse("State contained no create event.")); } - services().rooms.force_state( + services().rooms.state.force_state( room_id, state .into_iter() @@ -780,7 +788,7 @@ pub(crate) async fn invite_helper<'a>( redacts: None, }, sender_user, room_id, &state_lock); - let invite_room_state = services().rooms.calculate_invite_state(&pdu)?; + let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; drop(state_lock); diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 14affc65..f8d06023 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -87,7 +87,7 @@ pub async fn create_room_route( Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") })?; - if services().rooms.id_from_alias(&alias)?.is_some() { + if services().rooms.alias.resolve_local_alias(&alias)?.is_some() { Err(Error::BadRequest( ErrorKind::RoomInUse, "Room alias already exists.", diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index 4e8d594e..b2dfe2a7 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -246,7 +246,7 @@ async fn send_state_event_for_key_helper( if alias.server_name() != services().globals.server_name() || services() .rooms - .id_from_alias(&alias)? + .alias.resolve_local_alias(&alias)? .filter(|room| room == room_id) // Make sure it's the right room .is_none() { diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 776777d1..bacc1ac7 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1842,7 +1842,7 @@ pub async fn get_room_information_route( let room_id = services() .rooms - .id_from_alias(&body.room_alias)? + .alias.resolve_local_alias(&body.room_alias)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Room alias not found.", diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 70ad9f2a..49c9170f 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,17 +1,14 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::error::ErrorKind, - events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - RoomId, UserId, -}; -use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, sync::Arc}; - -impl AccountData { +use std::collections::HashMap; + +use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw, RoomId}; +use serde::{Serialize, de::DeserializeOwned}; + +use crate::{Result, database::KeyValueDatabase, service, Error, utils, services}; + +impl service::account_data::Data for KeyValueDatabase { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - pub fn update( + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, @@ -63,7 +60,7 @@ impl AccountData { /// Searches the account data for a specific kind. #[tracing::instrument(skip(self, room_id, user_id, kind))] - pub fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, @@ -96,7 +93,7 @@ impl AccountData { /// Returns all changes to the account data that happened after `since`. #[tracing::instrument(skip(self, room_id, user_id, since))] - pub fn changes_since( + fn changes_since( &self, room_id: Option<&RoomId>, user_id: &UserId, diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index eae2cfbc..edb027e9 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,4 +1,4 @@ -use crate::{database::KeyValueDatabase, service, utils, Error}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::appservice::Data for KeyValueDatabase { /// Registers an appservice and returns the ID to the caller @@ -54,7 +54,7 @@ impl service::appservice::Data for KeyValueDatabase { ) } - fn iter_ids(&self) -> Result> + '_> { + fn iter_ids(&self) -> Result>>> { Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { utils::string_from_bytes(&id) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 2b47e5b1..81e6ee1f 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,108 +1,13 @@ -mod data; -pub use data::Data; +use ruma::signatures::Ed25519KeyPair; -use crate::service::*; +use crate::{Result, service, database::KeyValueDatabase, Error, utils}; -use crate::{database::Config, server_server::FedDest, utils, Error, Result}; -use ruma::{ - api::{ - client::sync::sync_events, - federation::discovery::{ServerSigningKeys, VerifyKey}, - }, - DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, UserId, -}; -use std::{ - collections::{BTreeMap, HashMap}, - fs, - future::Future, - net::{IpAddr, SocketAddr}, - path::PathBuf, - sync::{Arc, Mutex, RwLock}, - time::{Duration, Instant}, -}; -use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; -use tracing::error; -use trust_dns_resolver::TokioAsyncResolver; - -use super::abstraction::Tree; - -pub const COUNTER: &[u8] = b"c"; - -type WellKnownMap = HashMap, (FedDest, String)>; -type TlsNameMap = HashMap, u16)>; -type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries -type SyncHandle = ( - Option, // since - Receiver>>, // rx -); - -pub struct Service { - db: D, - - pub actual_destination_cache: Arc>, // actual_destination, host - pub tls_name_override: Arc>, - pub config: Config, - keypair: Arc, - dns_resolver: TokioAsyncResolver, - jwt_decoding_key: Option>, - federation_client: reqwest::Client, - default_client: reqwest::Client, - pub stable_room_versions: Vec, - pub unstable_room_versions: Vec, - pub bad_event_ratelimiter: Arc, RateLimitState>>>, - pub bad_signature_ratelimiter: Arc, RateLimitState>>>, - pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock, Box), SyncHandle>>, - pub roomid_mutex_insert: RwLock, Arc>>>, - pub roomid_mutex_state: RwLock, Arc>>>, - pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer - pub roomid_federationhandletime: RwLock, (Box, Instant)>>, - pub stateres_mutex: Arc>, - pub rotate: RotationHandler, -} - -/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. -/// -/// This is utilized to have sync workers return early and release read locks on the database. -pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>); - -impl RotationHandler { - pub fn new() -> Self { - let (s, r) = broadcast::channel(1); - Self(s, r) - } - - pub fn watch(&self) -> impl Future { - let mut r = self.0.subscribe(); - - async move { - let _ = r.recv().await; - } - } - - pub fn fire(&self) { - let _ = self.0.send(()); - } -} - -impl Default for RotationHandler { - fn default() -> Self { - Self::new() - } -} - - -impl Service<_> { - pub fn load( - globals: Arc, - server_signingkeys: Arc, - config: Config, - ) -> Result { - let keypair_bytes = globals.get(b"keypair")?.map_or_else( +impl service::globals::Data for KeyValueDatabase { + fn load_keypair(&self) -> Result { + let keypair_bytes = self.globals.get(b"keypair")?.map_or_else( || { let keypair = utils::generate_keypair(); - globals.insert(b"keypair", &keypair)?; + self.globals.insert(b"keypair", &keypair)?; Ok::<_, Error>(keypair) }, |s| Ok(s.to_vec()), @@ -125,302 +30,11 @@ impl Service<_> { .map(|key| (version, key)) }) .and_then(|(version, key)| { - ruma::signatures::Ed25519KeyPair::from_der(key, version) + Ed25519KeyPair::from_der(key, version) .map_err(|_| Error::bad_database("Private or public keys are invalid.")) }); - - let keypair = match keypair { - Ok(k) => k, - Err(e) => { - error!("Keypair invalid. Deleting..."); - globals.remove(b"keypair")?; - return Err(e); - } - }; - - let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new())); - - let jwt_decoding_key = config - .jwt_secret - .as_ref() - .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); - - let default_client = reqwest_client_builder(&config)?.build()?; - let name_override = Arc::clone(&tls_name_override); - let federation_client = reqwest_client_builder(&config)? - .resolve_fn(move |domain| { - let read_guard = name_override.read().unwrap(); - let (override_name, port) = read_guard.get(&domain)?; - let first_name = override_name.get(0)?; - Some(SocketAddr::new(*first_name, *port)) - }) - .build()?; - - // Supported and stable room versions - let stable_room_versions = vec![ - RoomVersionId::V6, - RoomVersionId::V7, - RoomVersionId::V8, - RoomVersionId::V9, - ]; - // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; - - let mut s = Self { - globals, - config, - keypair: Arc::new(keypair), - dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { - error!( - "Failed to set up trust dns resolver with system config: {}", - e - ); - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, - actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), - tls_name_override, - federation_client, - default_client, - server_signingkeys, - jwt_decoding_key, - stable_room_versions, - unstable_room_versions, - bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - roomid_mutex_state: RwLock::new(HashMap::new()), - roomid_mutex_insert: RwLock::new(HashMap::new()), - roomid_mutex_federation: RwLock::new(HashMap::new()), - roomid_federationhandletime: RwLock::new(HashMap::new()), - stateres_mutex: Arc::new(Mutex::new(())), - sync_receivers: RwLock::new(HashMap::new()), - rotate: RotationHandler::new(), - }; - - fs::create_dir_all(s.get_media_folder())?; - - if !s - .supported_room_versions() - .contains(&s.config.default_room_version) - { - error!("Room version in config isn't supported, falling back to Version 6"); - s.config.default_room_version = RoomVersionId::V6; - }; - - Ok(s) - } - - /// Returns this server's keypair. - pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { - &self.keypair - } - - /// Returns a reqwest client which can be used to send requests - pub fn default_client(&self) -> reqwest::Client { - // Client is cheap to clone (Arc wrapper) and avoids lifetime issues - self.default_client.clone() } - - /// Returns a client used for resolving .well-knowns - pub fn federation_client(&self) -> reqwest::Client { - // Client is cheap to clone (Arc wrapper) and avoids lifetime issues - self.federation_client.clone() - } - - #[tracing::instrument(skip(self))] - pub fn next_count(&self) -> Result { - utils::u64_from_bytes(&self.globals.increment(COUNTER)?) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) - } - - #[tracing::instrument(skip(self))] - pub fn current_count(&self) -> Result { - self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) - }) - } - - pub fn server_name(&self) -> &ServerName { - self.config.server_name.as_ref() - } - - pub fn max_request_size(&self) -> u32 { - self.config.max_request_size - } - - pub fn allow_registration(&self) -> bool { - self.config.allow_registration - } - - pub fn allow_encryption(&self) -> bool { - self.config.allow_encryption - } - - pub fn allow_federation(&self) -> bool { - self.config.allow_federation - } - - pub fn allow_room_creation(&self) -> bool { - self.config.allow_room_creation + fn remove_keypair(&self) -> Result<()> { + self.globals.remove(b"keypair")? } - - pub fn allow_unstable_room_versions(&self) -> bool { - self.config.allow_unstable_room_versions - } - - pub fn default_room_version(&self) -> RoomVersionId { - self.config.default_room_version.clone() - } - - pub fn trusted_servers(&self) -> &[Box] { - &self.config.trusted_servers - } - - pub fn dns_resolver(&self) -> &TokioAsyncResolver { - &self.dns_resolver - } - - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { - self.jwt_decoding_key.as_ref() - } - - pub fn turn_password(&self) -> &String { - &self.config.turn_password - } - - pub fn turn_ttl(&self) -> u64 { - self.config.turn_ttl - } - - pub fn turn_uris(&self) -> &[String] { - &self.config.turn_uris - } - - pub fn turn_username(&self) -> &String { - &self.config.turn_username - } - - pub fn turn_secret(&self) -> &String { - &self.config.turn_secret - } - - pub fn emergency_password(&self) -> &Option { - &self.config.emergency_password - } - - pub fn supported_room_versions(&self) -> Vec { - let mut room_versions: Vec = vec![]; - room_versions.extend(self.stable_room_versions.clone()); - if self.allow_unstable_room_versions() { - room_versions.extend(self.unstable_room_versions.clone()); - }; - room_versions - } - - /// TODO: the key valid until timestamp is only honored in room version > 4 - /// Remove the outdated keys and insert the new ones. - /// - /// This doesn't actually check that the keys provided are newer than the old set. - pub fn add_signing_key( - &self, - origin: &ServerName, - new_keys: ServerSigningKeys, - ) -> Result, VerifyKey>> { - // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; - - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).ok()) - .unwrap_or_else(|| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); - - let ServerSigningKeys { - verify_keys, - old_verify_keys, - .. - } = new_keys; - - keys.verify_keys.extend(verify_keys.into_iter()); - keys.old_verify_keys.extend(old_verify_keys.into_iter()); - - self.server_signingkeys.insert( - origin.as_bytes(), - &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - )?; - - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - - Ok(tree) - } - - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. - pub fn signing_keys_for( - &self, - origin: &ServerName, - ) -> Result, VerifyKey>> { - let signingkeys = self - .server_signingkeys - .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice(&bytes).ok()) - .map(|keys: ServerSigningKeys| { - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - tree - }) - .unwrap_or_else(BTreeMap::new); - - Ok(signingkeys) - } - - pub fn database_version(&self) -> Result { - self.globals.get(b"version")?.map_or(Ok(0), |version| { - utils::u64_from_bytes(&version) - .map_err(|_| Error::bad_database("Database version id is invalid.")) - }) - } - - pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.globals - .insert(b"version", &new_version.to_be_bytes())?; - Ok(()) - } - - pub fn get_media_folder(&self) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.config.database_path.clone()); - r.push("media"); - r - } - - pub fn get_media_file(&self, key: &[u8]) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.config.database_path.clone()); - r.push("media"); - r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); - r - } -} - -fn reqwest_client_builder(config: &Config) -> Result { - let mut reqwest_client_builder = reqwest::Client::builder() - .connect_timeout(Duration::from_secs(30)) - .timeout(Duration::from_secs(60 * 3)); - - if let Some(proxy) = config.proxy.to_proxy()? { - reqwest_client_builder = reqwest_client_builder.proxy(proxy); - } - - Ok(reqwest_client_builder) } diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index be1d6b18..8171451c 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,16 +1,11 @@ -use crate::{utils, Error, Result, services}; -use ruma::{ - api::client::{ - backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - error::ErrorKind, - }, - serde::Raw, - RoomId, UserId, -}; -use std::{collections::BTreeMap, sync::Arc}; - -impl KeyBackups { - pub fn create_backup( +use std::collections::BTreeMap; + +use ruma::{UserId, serde::Raw, api::client::{backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind}, RoomId}; + +use crate::{Result, service, database::KeyValueDatabase, services, Error, utils}; + +impl service::key_backups::Data for KeyValueDatabase { + fn create_backup( &self, user_id: &UserId, backup_metadata: &Raw, @@ -30,7 +25,7 @@ impl KeyBackups { Ok(version) } - pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { + fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -47,7 +42,7 @@ impl KeyBackups { Ok(()) } - pub fn update_backup( + fn update_backup( &self, user_id: &UserId, version: &str, @@ -71,7 +66,7 @@ impl KeyBackups { Ok(version.to_owned()) } - pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { + fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); let mut last_possible_key = prefix.clone(); @@ -92,7 +87,7 @@ impl KeyBackups { .transpose() } - pub fn get_latest_backup( + fn get_latest_backup( &self, user_id: &UserId, ) -> Result)>> { @@ -123,7 +118,7 @@ impl KeyBackups { .transpose() } - pub fn get_backup( + fn get_backup( &self, user_id: &UserId, version: &str, @@ -140,7 +135,7 @@ impl KeyBackups { }) } - pub fn add_key( + fn add_key( &self, user_id: &UserId, version: &str, @@ -173,7 +168,7 @@ impl KeyBackups { Ok(()) } - pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { + fn count_keys(&self, user_id: &UserId, version: &str) -> Result { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -181,7 +176,7 @@ impl KeyBackups { Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) } - pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { + fn get_etag(&self, user_id: &UserId, version: &str) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -196,7 +191,7 @@ impl KeyBackups { .to_string()) } - pub fn get_all( + fn get_all( &self, user_id: &UserId, version: &str, @@ -252,7 +247,7 @@ impl KeyBackups { Ok(rooms) } - pub fn get_room( + fn get_room( &self, user_id: &UserId, version: &str, @@ -289,7 +284,7 @@ impl KeyBackups { .collect()) } - pub fn get_session( + fn get_session( &self, user_id: &UserId, version: &str, @@ -314,7 +309,7 @@ impl KeyBackups { .transpose() } - pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { + fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -327,7 +322,7 @@ impl KeyBackups { Ok(()) } - pub fn delete_room_keys( + fn delete_room_keys( &self, user_id: &UserId, version: &str, @@ -347,7 +342,7 @@ impl KeyBackups { Ok(()) } - pub fn delete_room_key( + fn delete_room_key( &self, user_id: &UserId, version: &str, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 1bdf6d47..90a5c590 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,72 +1,7 @@ -use image::{imageops::FilterType, GenericImageView}; +use crate::{database::KeyValueDatabase, service, Error, utils, Result}; -use super::abstraction::Tree; -use crate::{utils, Error, Result}; -use std::{mem, sync::Arc}; -use tokio::{ - fs::File, - io::{AsyncReadExt, AsyncWriteExt}, -}; - -pub struct FileMeta { - pub content_disposition: Option, - pub content_type: Option, - pub file: Vec, -} - -pub struct Media { - pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType -} - -impl Media { - /// Uploads a file. - pub async fn create( - &self, - mxc: String, - globals: &Globals, - content_disposition: &Option<&str>, - content_type: &Option<&str>, - file: &[u8], - ) -> Result<()> { - let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xff); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); - - let path = globals.get_media_file(&key); - let mut f = File::create(path).await?; - f.write_all(file).await?; - - self.mediaid_file.insert(&key, &[])?; - Ok(()) - } - - /// Uploads or replaces a file thumbnail. - #[allow(clippy::too_many_arguments)] - pub async fn upload_thumbnail( - &self, - mxc: String, - globals: &Globals, - content_disposition: &Option, - content_type: &Option, - width: u32, - height: u32, - file: &[u8], - ) -> Result<()> { +impl service::media::Data for KeyValueDatabase { + fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: &Option<&str>, content_type: &Option<&str>) -> Result> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&width.to_be_bytes()); @@ -86,272 +21,46 @@ impl Media { .unwrap_or_default(), ); - let path = globals.get_media_file(&key); - let mut f = File::create(path).await?; - f.write_all(file).await?; - self.mediaid_file.insert(&key, &[])?; - Ok(()) + Ok(key) } - /// Downloads a file. - pub async fn get(&self, globals: &Globals, mxc: &str) -> Result> { + fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - let first = self.mediaid_file.scan_prefix(prefix).next(); - if let Some((key, _)) = first { - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file, - })) - } else { - Ok(None) - } - } - - /// Returns width, height of the thumbnail and whether it should be cropped. Returns None when - /// the server should send the original file. - pub fn thumbnail_properties(&self, width: u32, height: u32) -> Option<(u32, u32, bool)> { - match (width, height) { - (0..=32, 0..=32) => Some((32, 32, true)), - (0..=96, 0..=96) => Some((96, 96, true)), - (0..=320, 0..=240) => Some((320, 240, false)), - (0..=640, 0..=480) => Some((640, 480, false)), - (0..=800, 0..=600) => Some((800, 600, false)), - _ => None, - } - } - - /// Downloads a file's thumbnail. - /// - /// Here's an example on how it works: - /// - /// - Client requests an image with width=567, height=567 - /// - Server rounds that up to (800, 600), so it doesn't have to save too many thumbnails - /// - Server rounds that up again to (958, 600) to fix the aspect ratio (only for width,height>96) - /// - Server creates the thumbnail and sends it to the user - /// - /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. - pub async fn get_thumbnail( - &self, - mxc: &str, - globals: &Globals, - width: u32, - height: u32, - ) -> Result> { - let (width, height, crop) = self - .thumbnail_properties(width, height) - .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - - let mut main_prefix = mxc.as_bytes().to_vec(); - main_prefix.push(0xff); - - let mut thumbnail_prefix = main_prefix.clone(); - thumbnail_prefix.extend_from_slice(&width.to_be_bytes()); - thumbnail_prefix.extend_from_slice(&height.to_be_bytes()); - thumbnail_prefix.push(0xff); - - let mut original_prefix = main_prefix; - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - original_prefix.push(0xff); - - let first_thumbnailprefix = self.mediaid_file.scan_prefix(thumbnail_prefix).next(); - let first_originalprefix = self.mediaid_file.scan_prefix(original_prefix).next(); - if let Some((key, _)) = first_thumbnailprefix { - // Using saved thumbnail - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database("Content Disposition in db is invalid.") - })?, - ) - }; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })) - } else if let Some((key, _)) = first_originalprefix { - // Generate a thumbnail - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; + let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::NotFound)?; - let mut parts = key.rsplit(|&b| b == 0xff); + let mut parts = key.rsplit(|&b| b == 0xff); - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) + let content_type = parts + .next() + .map(|bytes| { + utils::string_from_bytes(bytes).map_err(|_| { + Error::bad_database("Content type in mediaid_file is invalid unicode.") }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; - - if let Ok(image) = image::load_from_memory(&file) { - let original_width = image.width(); - let original_height = image.height(); - if width > original_width || height > original_height { - return Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })); - } - - let thumbnail = if crop { - image.resize_to_fill(width, height, FilterType::CatmullRom) - } else { - let (exact_width, exact_height) = { - // Copied from image::dynimage::resize_dimensions - let ratio = u64::from(original_width) * u64::from(height); - let nratio = u64::from(width) * u64::from(original_height); - - let use_width = nratio <= ratio; - let intermediate = if use_width { - u64::from(original_height) * u64::from(width) - / u64::from(original_width) - } else { - u64::from(original_width) * u64::from(height) - / u64::from(original_height) - }; - if use_width { - if intermediate <= u64::from(::std::u32::MAX) { - (width, intermediate as u32) - } else { - ( - (u64::from(width) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ::std::u32::MAX, - ) - } - } else if intermediate <= u64::from(::std::u32::MAX) { - (intermediate as u32, height) - } else { - ( - ::std::u32::MAX, - (u64::from(height) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ) - } - }; - - image.thumbnail_exact(exact_width, exact_height) - }; - - let mut thumbnail_bytes = Vec::new(); - thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; - - // Save thumbnail in database so we don't have to generate it again next time - let mut thumbnail_key = key.to_vec(); - let width_index = thumbnail_key - .iter() - .position(|&b| b == 0xff) - .ok_or_else(|| Error::bad_database("Media in db is invalid."))? - + 1; - let mut widthheight = width.to_be_bytes().to_vec(); - widthheight.extend_from_slice(&height.to_be_bytes()); - - thumbnail_key.splice( - width_index..width_index + 2 * mem::size_of::(), - widthheight, - ); - - let path = globals.get_media_file(&thumbnail_key); - let mut f = File::create(path).await?; - f.write_all(&thumbnail_bytes).await?; + }) + .transpose()?; - self.mediaid_file.insert(&thumbnail_key, &[])?; + let content_disposition_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - Ok(Some(FileMeta { - content_disposition, - content_type, - file: thumbnail_bytes.to_vec(), - })) - } else { - // Couldn't parse file to generate thumbnail, send original - Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })) - } + let content_disposition = if content_disposition_bytes.is_empty() { + None } else { - Ok(None) - } + Some( + utils::string_from_bytes(content_disposition_bytes).map_err(|_| { + Error::bad_database( + "Content Disposition in mediaid_file is invalid unicode.", + ) + })?, + ) + }; + Ok((content_disposition, content_type, key)) } } diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs index 189571f6..efb85509 100644 --- a/src/database/key_value/mod.rs +++ b/src/database/key_value/mod.rs @@ -1,9 +1,9 @@ -//mod account_data; +mod account_data; //mod admin; mod appservice; -//mod globals; -//mod key_backups; -//mod media; +mod globals; +mod key_backups; +mod media; //mod pdu; mod pusher; mod rooms; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index b77170db..35c84638 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,6 +1,6 @@ use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; -use crate::{service, database::KeyValueDatabase, Error}; +use crate::{service, database::KeyValueDatabase, Error, Result}; impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { @@ -51,7 +51,7 @@ impl service::pusher::Data for KeyValueDatabase { fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a { + ) -> Box>> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index a9236a75..c762defa 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,12 +1,12 @@ use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; -use crate::{service, database::KeyValueDatabase, utils, Error, services}; +use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; impl service::rooms::alias::Data for KeyValueDatabase { fn set_alias( &self, alias: &RoomAliasId, - room_id: Option<&RoomId> + room_id: &RoomId ) -> Result<()> { self.alias_roomid .insert(alias.alias().as_bytes(), room_id.as_bytes())?; @@ -41,7 +41,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { fn resolve_local_alias( &self, alias: &RoomAliasId - ) -> Result<()> { + ) -> Result>> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { @@ -56,7 +56,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { fn local_aliases_for_room( &self, room_id: &RoomId, - ) -> Result<()> { + ) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 57dbb147..585d5626 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,5 +1,9 @@ -impl service::room::auth_chain::Data for KeyValueDatabase { - fn get_cached_eventid_authchain<'a>() -> Result> { +use std::{collections::HashSet, mem::size_of}; + +use crate::{service, database::KeyValueDatabase, Result, utils}; + +impl service::rooms::auth_chain::Data for KeyValueDatabase { + fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result> { self.shorteventid_authchain .get(&shorteventid.to_be_bytes())? .map(|chain| { @@ -12,8 +16,8 @@ impl service::room::auth_chain::Data for KeyValueDatabase { }) } - fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result<()> { - shorteventid_authchain.insert( + fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()> { + self.shorteventid_authchain.insert( &shorteventid.to_be_bytes(), &auth_chain .iter() diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 44a580c3..c48afa9a 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils, Error}; +use crate::{service, database::KeyValueDatabase, utils, Error, Result}; impl service::rooms::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { @@ -15,7 +15,7 @@ impl service::rooms::directory::Data for KeyValueDatabase { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - fn public_rooms(&self) -> impl Iterator>> + '_ { + fn public_rooms(&self) -> Box>>> { self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index 9ffd33da..b5007f89 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -1,3 +1,7 @@ mod presence; mod typing; mod read_receipt; + +use crate::{service, database::KeyValueDatabase}; + +impl service::rooms::edus::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 9f3977db..fbbbff55 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; -use crate::{service, database::KeyValueDatabase, utils, Error, services}; +use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( @@ -56,8 +56,8 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { fn get_presence_event( &self, - user_id: &UserId, room_id: &RoomId, + user_id: &UserId, count: u64, ) -> Result> { let mut presence_id = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 68aea165..42d250f7 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -2,7 +2,7 @@ use std::mem; use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; -use crate::{database::KeyValueDatabase, service, utils, Error, services}; +use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( @@ -50,13 +50,13 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { &'a self, room_id: &RoomId, since: u64, - ) -> impl Iterator< + ) -> Box, u64, Raw, )>, - > + 'a { + >> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let prefix2 = prefix.clone(); diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 905bffc8..b7d35968 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -2,7 +2,7 @@ use std::collections::HashSet; use ruma::{UserId, RoomId}; -use crate::{database::KeyValueDatabase, service, utils, Error, services}; +use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; impl service::rooms::edus::typing::Data for KeyValueDatabase { fn typing_add( @@ -79,7 +79,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { fn typings_all( &self, room_id: &RoomId, - ) -> Result> { + ) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index c230cbf7..aaf14dd3 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,6 +1,6 @@ use ruma::{UserId, DeviceId, RoomId}; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, Result}; impl service::rooms::lazy_loading::Data for KeyValueDatabase { fn lazy_load_was_sent_before( diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index b4cba2c6..0509cbb8 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, Result}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index adb810ba..406943ed 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -1,16 +1,20 @@ mod alias; +mod auth_chain; mod directory; mod edus; -//mod event_handler; mod lazy_load; mod metadata; mod outlier; mod pdu_metadata; mod search; -//mod short; +mod short; mod state; mod state_accessor; mod state_cache; mod state_compressor; mod timeline; mod user; + +use crate::{database::KeyValueDatabase, service}; + +impl service::rooms::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index 08299a0c..aa975449 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,6 +1,6 @@ use ruma::{EventId, signatures::CanonicalJsonObject}; -use crate::{service, database::KeyValueDatabase, PduEvent, Error}; +use crate::{service, database::KeyValueDatabase, PduEvent, Error, Result}; impl service::rooms::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index 602f3f6c..f3ac414f 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use ruma::{RoomId, EventId}; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, Result}; impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 44663ff3..15937f6d 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -2,10 +2,10 @@ use std::mem::size_of; use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils}; +use crate::{service, database::KeyValueDatabase, utils, Result}; impl service::rooms::search::Data for KeyValueDatabase { - fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()> { + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) @@ -26,7 +26,7 @@ impl service::rooms::search::Data for KeyValueDatabase { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result> + 'a, Vec)>> { + ) -> Result>>, Vec)>> { let prefix = self .get_shortroomid(room_id)? .expect("room exists") diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs new file mode 100644 index 00000000..91296385 --- /dev/null +++ b/src/database/key_value/rooms/short.rs @@ -0,0 +1,4 @@ +use crate::{database::KeyValueDatabase, service}; + +impl service::rooms::short::Data for KeyValueDatabase { +} diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 192dbb83..405939dd 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use std::{sync::MutexGuard, collections::HashSet}; use std::fmt::Debug; -use crate::{service, database::KeyValueDatabase, utils, Error}; +use crate::{service, database::KeyValueDatabase, utils, Error, Result}; impl service::rooms::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { @@ -24,7 +24,7 @@ impl service::rooms::state::Data for KeyValueDatabase { Ok(()) } - fn set_event_state(&self, shorteventid: Vec, shortstatehash: Vec) -> Result<()> { + fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()> { self.shorteventid_shortstatehash .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; Ok(()) diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ea15afc0..037b98fc 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,6 +1,6 @@ use std::{collections::{BTreeMap, HashMap}, sync::Arc}; -use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils}; +use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result}; use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 567dc809..5f054858 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,6 +1,6 @@ -use ruma::{UserId, RoomId}; +use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw}; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, services, Result}; impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -9,4 +9,70 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { userroom_id.extend_from_slice(room_id.as_bytes()); self.roomuseroncejoinedids.insert(&userroom_id, &[]) } + + fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_joined.insert(&userroom_id, &[])?; + self.roomuserid_joined.insert(&roomuser_id, &[])?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_invitestate.insert( + &userroom_id, + &serde_json::to_vec(&last_state.unwrap_or_default()) + .expect("state to bytes always works"), + )?; + self.roomuserid_invitecount + .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_leftstate.insert( + &userroom_id, + &serde_json::to_vec(&Vec::>::new()).unwrap(), + )?; // TODO + self.roomuserid_leftcount + .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; + + Ok(()) + } } diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 09e35660..23a7122b 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, mem::size_of}; -use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils}; +use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils, Result}; impl service::rooms::state_compressor::Data for KeyValueDatabase { fn get_statediff(&self, shortstatehash: u64) -> Result { diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index cf93df12..c42509e0 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -3,7 +3,7 @@ use std::{collections::hash_map, mem::size_of, sync::Arc}; use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject}; use tracing::error; -use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent}; +use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result}; impl service::rooms::timeline::Data for KeyValueDatabase { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { @@ -190,7 +190,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>> + 'a> { + ) -> Result, PduEvent)>>>> { let prefix = self .get_shortroomid(room_id)? .expect("room exists") @@ -224,7 +224,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>> + 'a> { + ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id let prefix = self .get_shortroomid(room_id)? @@ -258,7 +258,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>> + 'a> { + ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id let prefix = self .get_shortroomid(room_id)? diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 2fc3b9f4..d49bc1d7 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,6 +1,6 @@ use ruma::{UserId, RoomId}; -use crate::{service, database::KeyValueDatabase, utils, Error}; +use crate::{service, database::KeyValueDatabase, utils, Error, Result}; impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -78,7 +78,7 @@ impl service::rooms::user::Data for KeyValueDatabase { fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>> + 'a> { + ) -> Result>>>> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index 6652a627..a63b3c5d 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,6 +1,6 @@ use ruma::{UserId, DeviceId, TransactionId}; -use crate::{service, database::KeyValueDatabase}; +use crate::{service, database::KeyValueDatabase, Result}; impl service::transaction_ids::Data for KeyValueDatabase { fn add_txnid( diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index b1960bd5..cf242dec 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,8 +1,6 @@ -use std::io::ErrorKind; +use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}}; -use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::uiaa::UiaaInfo}; - -use crate::{database::KeyValueDatabase, service, Error}; +use crate::{database::KeyValueDatabase, service, Error, Result}; impl service::uiaa::Data for KeyValueDatabase { fn set_uiaa_request( diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index ea844903..82e3bac6 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -3,7 +3,7 @@ use std::{mem::size_of, collections::BTreeMap}; use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; use tracing::warn; -use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services}; +use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services, Result}; impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. @@ -56,7 +56,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all users on this homeserver. - fn iter(&self) -> impl Iterator>> + '_ { + fn iter(&self) -> Box>>> { self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") @@ -270,7 +270,7 @@ impl service::users::Data for KeyValueDatabase { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { + ) -> Box>>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata @@ -608,7 +608,7 @@ impl service::users::Data for KeyValueDatabase { user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator>> + 'a { + ) -> Box>>> { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -878,7 +878,7 @@ impl service::users::Data for KeyValueDatabase { fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a { + ) -> Box>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/mod.rs b/src/database/mod.rs index 12758af2..4ea619a8 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,7 +1,7 @@ pub mod abstraction; pub mod key_value; -use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, admin::{self, create_admin_room}, appservice, pusher}}; +use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}}; use abstraction::KeyValueDatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -253,7 +253,7 @@ impl KeyValueDatabase { let (admin_sender, admin_receiver) = mpsc::unbounded_channel(); let (sending_sender, sending_receiver) = mpsc::unbounded_channel(); - let db = Arc::new(TokioRwLock::from(Self { + let db = Self { _db: builder.clone(), userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, @@ -345,10 +345,9 @@ impl KeyValueDatabase { senderkey_pusher: builder.open_tree("senderkey_pusher")?, global: builder.open_tree("global")?, server_signingkeys: builder.open_tree("server_signingkeys")?, - })); + }; // TODO: do this after constructing the db - let guard = db.read().await; // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. diff --git a/src/lib.rs b/src/lib.rs index 0d058df3..c6e65697 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,7 +13,7 @@ mod service; pub mod api; mod utils; -use std::cell::Cell; +use std::{cell::Cell, sync::RwLock}; pub use config::Config; pub use utils::error::{Error, Result}; @@ -22,13 +22,13 @@ pub use api::ruma_wrapper::{Ruma, RumaResponse}; use crate::database::KeyValueDatabase; -pub static SERVICES: Cell> = Cell::new(None); +pub static SERVICES: RwLock> = RwLock::new(None); enum ServicesEnum { Rocksdb(Services) } -pub fn services() -> Services { - SERVICES.get().unwrap() +pub fn services() -> Services { + SERVICES.read().unwrap() } diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs index 70ad9f2a..0f8e0bf5 100644 --- a/src/service/account_data/data.rs +++ b/src/service/account_data/data.rs @@ -1,145 +1,32 @@ -use crate::{utils, Error, Result}; -use ruma::{ - api::client::error::ErrorKind, - events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - RoomId, UserId, -}; -use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; -impl AccountData { +use ruma::{UserId, RoomId, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw}; +use serde::{Serialize, de::DeserializeOwned}; +use crate::Result; + +pub trait Data { /// Places one event in the account data of the user and removes the previous entry. - #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - pub fn update( + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, data: &T, - ) -> Result<()> { - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); - - let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); - roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); - - let mut key = prefix; - key.extend_from_slice(event_type.to_string().as_bytes()); - - let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling - if json.get("type").is_none() || json.get("content").is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Account data doesn't have all required fields.", - )); - } - - self.roomuserdataid_accountdata.insert( - &roomuserdataid, - &serde_json::to_vec(&json).expect("to_vec always works on json values"), - )?; - - let prev = self.roomusertype_roomuserdataid.get(&key)?; - - self.roomusertype_roomuserdataid - .insert(&key, &roomuserdataid)?; - - // Remove old entry - if let Some(prev) = prev { - self.roomuserdataid_accountdata.remove(&prev)?; - } - - Ok(()) - } + ) -> Result<()>; /// Searches the account data for a specific kind. - #[tracing::instrument(skip(self, room_id, user_id, kind))] - pub fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, - ) -> Result> { - let mut key = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(kind.to_string().as_bytes()); - - self.roomusertype_roomuserdataid - .get(&key)? - .and_then(|roomuserdataid| { - self.roomuserdataid_accountdata - .get(&roomuserdataid) - .transpose() - }) - .transpose()? - .map(|data| { - serde_json::from_slice(&data) - .map_err(|_| Error::bad_database("could not deserialize")) - }) - .transpose() - } + ) -> Result>; /// Returns all changes to the account data that happened after `since`. - #[tracing::instrument(skip(self, room_id, user_id, since))] - pub fn changes_since( + fn changes_since( &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { - let mut userdata = HashMap::new(); - - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); - - // Skip the data that's exactly at since, because we sent that last time - let mut first_possible = prefix.clone(); - first_possible.extend_from_slice(&(since + 1).to_be_bytes()); - - for r in self - .roomuserdataid_accountdata - .iter_from(&first_possible, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(k, v)| { - Ok::<_, Error>(( - RoomAccountDataEventType::try_from( - utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( - || Error::bad_database("RoomUserData ID in db is invalid."), - )?) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - serde_json::from_slice::>(&v).map_err(|_| { - Error::bad_database("Database contains invalid account data.") - })?, - )) - }) - { - let (kind, data) = r?; - userdata.insert(kind, data); - } - - Ok(userdata) - } + ) -> Result>>; } diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 70ad9f2a..7a399223 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -1,14 +1,27 @@ -use crate::{utils, Error, Result}; +mod data; + +pub use data::Data; + use ruma::{ - api::client::error::ErrorKind, + api::client::{ + error::ErrorKind, + }, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, serde::Raw, - RoomId, UserId, + signatures::CanonicalJsonValue, + DeviceId, RoomId, UserId, }; use serde::{de::DeserializeOwned, Serialize}; use std::{collections::HashMap, sync::Arc}; +use tracing::error; + +use crate::{service::*, services, utils, Error, Result}; + +pub struct Service { + db: D, +} -impl AccountData { +impl Service { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] pub fn update( diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index ded0adb9..dad4ceba 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -28,165 +28,10 @@ use ruma::{ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; -use crate::{services, Error, api::{server_server, client_server::AUTO_GEN_PASSWORD_LENGTH}, PduEvent, utils::{HtmlEscape, self}}; +use crate::{Result, services, Error, api::{server_server, client_server::AUTO_GEN_PASSWORD_LENGTH}, PduEvent, utils::{HtmlEscape, self}}; use super::pdu::PduBuilder; -#[derive(Debug)] -pub enum AdminRoomEvent { - ProcessMessage(String), - SendMessage(RoomMessageEventContent), -} - -#[derive(Clone)] -pub struct Admin { - pub sender: mpsc::UnboundedSender, -} - -impl Admin { - pub fn start_handler( - &self, - mut receiver: mpsc::UnboundedReceiver, - ) { - tokio::spawn(async move { - // TODO: Use futures when we have long admin commands - //let mut futures = FuturesUnordered::new(); - - let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); - - let conduit_room = services() - .rooms - .id_from_alias( - format!("#admins:{}", services().globals.server_name()) - .as_str() - .try_into() - .expect("#admins:server_name is a valid room alias"), - ) - .expect("Database data for admin room alias must be valid") - .expect("Admin room must exist"); - - let send_message = |message: RoomMessageEventContent, - mutex_lock: &MutexGuard<'_, ()>| { - services() - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMessage, - content: to_raw_value(&message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - mutex_lock, - ) - .unwrap(); - }; - - loop { - tokio::select! { - Some(event) = receiver.recv() => { - let message_content = match event { - AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(room_message).await - }; - - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(conduit_room.clone()) - .or_default(), - ); - - let state_lock = mutex_state.lock().await; - - send_message(message_content, &state_lock); - - drop(state_lock); - } - } - } - }); - } - - pub fn process_message(&self, room_message: String) { - self.sender - .send(AdminRoomEvent::ProcessMessage(room_message)) - .unwrap(); - } - - pub fn send_message(&self, message_content: RoomMessageEventContent) { - self.sender - .send(AdminRoomEvent::SendMessage(message_content)) - .unwrap(); - } -} - -// Parse and process a message from the admin room -async fn process_admin_message(room_message: String) -> RoomMessageEventContent { - let mut lines = room_message.lines(); - let command_line = lines.next().expect("each string has at least one line"); - let body: Vec<_> = lines.collect(); - - let admin_command = match parse_admin_command(&command_line) { - Ok(command) => command, - Err(error) => { - let server_name = services().globals.server_name(); - let message = error - .to_string() - .replace("server.name", server_name.as_str()); - let html_message = usage_to_html(&message, server_name); - - return RoomMessageEventContent::text_html(message, html_message); - } - }; - - match process_admin_command(admin_command, body).await { - Ok(reply_message) => reply_message, - Err(error) => { - let markdown_message = format!( - "Encountered an error while handling the command:\n\ - ```\n{}\n```", - error, - ); - let html_message = format!( - "Encountered an error while handling the command:\n\ -
                \n{}\n
                ", - error, - ); - - RoomMessageEventContent::text_html(markdown_message, html_message) - } - } -} - -// Parse chat messages from the admin room into an AdminCommand object -fn parse_admin_command(command_line: &str) -> std::result::Result { - // Note: argv[0] is `@conduit:servername:`, which is treated as the main command - let mut argv: Vec<_> = command_line.split_whitespace().collect(); - - // Replace `help command` with `command --help` - // Clap has a help subcommand, but it omits the long help description. - if argv.len() > 1 && argv[1] == "help" { - argv.remove(1); - argv.push("--help"); - } - - // Backwards compatibility with `register_appservice`-style commands - let command_with_dashes; - if argv.len() > 1 && argv[1].contains("_") { - command_with_dashes = argv[1].replace("_", "-"); - argv[1] = &command_with_dashes; - } - - AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) -} - #[derive(Parser)] #[clap(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] enum AdminCommand { @@ -308,801 +153,959 @@ enum AdminCommand { EnableRoom { room_id: Box }, } -async fn process_admin_command( - command: AdminCommand, - body: Vec<&str>, -) -> Result { - let reply_message_content = match command { - AdminCommand::RegisterAppservice => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { - let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config); - match parsed_config { - Ok(yaml) => match services().appservice.register_appservice(yaml) { - Ok(id) => RoomMessageEventContent::text_plain(format!( - "Appservice registered with ID: {}.", - id - )), + +#[derive(Debug)] +pub enum AdminRoomEvent { + ProcessMessage(String), + SendMessage(RoomMessageEventContent), +} + +#[derive(Clone)] +pub struct Service { + pub sender: mpsc::UnboundedSender, +} + +impl Service { + pub fn start_handler( + &self, + mut receiver: mpsc::UnboundedReceiver, + ) { + tokio::spawn(async move { + // TODO: Use futures when we have long admin commands + //let mut futures = FuturesUnordered::new(); + + let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); + + let conduit_room = services() + .rooms + .id_from_alias( + format!("#admins:{}", services().globals.server_name()) + .as_str() + .try_into() + .expect("#admins:server_name is a valid room alias"), + ) + .expect("Database data for admin room alias must be valid") + .expect("Admin room must exist"); + + let send_message = |message: RoomMessageEventContent, + mutex_lock: &MutexGuard<'_, ()>| { + services() + .rooms + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + mutex_lock, + ) + .unwrap(); + }; + + loop { + tokio::select! { + Some(event) = receiver.recv() => { + let message_content = match event { + AdminRoomEvent::SendMessage(content) => content, + AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(room_message).await + }; + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(conduit_room.clone()) + .or_default(), + ); + + let state_lock = mutex_state.lock().await; + + send_message(message_content, &state_lock); + + drop(state_lock); + } + } + } + }); + } + + pub fn process_message(&self, room_message: String) { + self.sender + .send(AdminRoomEvent::ProcessMessage(room_message)) + .unwrap(); + } + + pub fn send_message(&self, message_content: RoomMessageEventContent) { + self.sender + .send(AdminRoomEvent::SendMessage(message_content)) + .unwrap(); + } + + // Parse and process a message from the admin room + async fn process_admin_message(&self, room_message: String) -> RoomMessageEventContent { + let mut lines = room_message.lines(); + let command_line = lines.next().expect("each string has at least one line"); + let body: Vec<_> = lines.collect(); + + let admin_command = match parse_admin_command(&command_line) { + Ok(command) => command, + Err(error) => { + let server_name = services().globals.server_name(); + let message = error + .to_string() + .replace("server.name", server_name.as_str()); + let html_message = usage_to_html(&message, server_name); + + return RoomMessageEventContent::text_html(message, html_message); + } + }; + + match process_admin_command(admin_command, body).await { + Ok(reply_message) => reply_message, + Err(error) => { + let markdown_message = format!( + "Encountered an error while handling the command:\n\ + ```\n{}\n```", + error, + ); + let html_message = format!( + "Encountered an error while handling the command:\n\ +
                \n{}\n
                ", + error, + ); + + RoomMessageEventContent::text_html(markdown_message, html_message) + } + } + } + + // Parse chat messages from the admin room into an AdminCommand object + fn parse_admin_command(&self, command_line: &str) -> std::result::Result { + // Note: argv[0] is `@conduit:servername:`, which is treated as the main command + let mut argv: Vec<_> = command_line.split_whitespace().collect(); + + // Replace `help command` with `command --help` + // Clap has a help subcommand, but it omits the long help description. + if argv.len() > 1 && argv[1] == "help" { + argv.remove(1); + argv.push("--help"); + } + + // Backwards compatibility with `register_appservice`-style commands + let command_with_dashes; + if argv.len() > 1 && argv[1].contains("_") { + command_with_dashes = argv[1].replace("_", "-"); + argv[1] = &command_with_dashes; + } + + AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) + } + + async fn process_admin_command( + &self, + command: AdminCommand, + body: Vec<&str>, + ) -> Result { + let reply_message_content = match command { + AdminCommand::RegisterAppservice => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let appservice_config = body[1..body.len() - 1].join("\n"); + let parsed_config = serde_yaml::from_str::(&appservice_config); + match parsed_config { + Ok(yaml) => match services().appservice.register_appservice(yaml) { + Ok(id) => RoomMessageEventContent::text_plain(format!( + "Appservice registered with ID: {}.", + id + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to register appservice: {}", + e + )), + }, Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to register appservice: {}", + "Could not parse appservice config: {}", e )), - }, - Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {}", - e - )), + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) } - } else { - RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - ) } - } - AdminCommand::UnregisterAppservice { - appservice_identifier, - } => match services().appservice.unregister_appservice(&appservice_identifier) { - Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to unregister appservice: {}", - e - )), - }, - AdminCommand::ListAppservices => { - if let Ok(appservices) = services().appservice.iter_ids().map(|ids| ids.collect::>()) { - let count = appservices.len(); + AdminCommand::UnregisterAppservice { + appservice_identifier, + } => match services().appservice.unregister_appservice(&appservice_identifier) { + Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to unregister appservice: {}", + e + )), + }, + AdminCommand::ListAppservices => { + if let Ok(appservices) = services().appservice.iter_ids().map(|ids| ids.collect::>()) { + let count = appservices.len(); + let output = format!( + "Appservices ({}): {}", + count, + appservices + .into_iter() + .filter_map(|r| r.ok()) + .collect::>() + .join(", ") + ); + RoomMessageEventContent::text_plain(output) + } else { + RoomMessageEventContent::text_plain("Failed to get appservices.") + } + } + AdminCommand::ListRooms => { + let room_ids = services().rooms.iter_ids(); let output = format!( - "Appservices ({}): {}", - count, - appservices - .into_iter() + "Rooms:\n{}", + room_ids .filter_map(|r| r.ok()) + .map(|id| id.to_string() + + "\tMembers: " + + &services() + .rooms + .room_joined_count(&id) + .ok() + .flatten() + .unwrap_or(0) + .to_string()) .collect::>() - .join(", ") + .join("\n") ); RoomMessageEventContent::text_plain(output) - } else { - RoomMessageEventContent::text_plain("Failed to get appservices.") } - } - AdminCommand::ListRooms => { - let room_ids = services().rooms.iter_ids(); - let output = format!( - "Rooms:\n{}", - room_ids - .filter_map(|r| r.ok()) - .map(|id| id.to_string() - + "\tMembers: " - + &services() - .rooms - .room_joined_count(&id) - .ok() - .flatten() - .unwrap_or(0) - .to_string()) - .collect::>() - .join("\n") - ); - RoomMessageEventContent::text_plain(output) - } - AdminCommand::ListLocalUsers => match services().users.list_local_users() { - Ok(users) => { - let mut msg: String = format!("Found {} local user account(s):\n", users.len()); - msg += &users.join("\n"); + AdminCommand::ListLocalUsers => match services().users.list_local_users() { + Ok(users) => { + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + RoomMessageEventContent::text_plain(&msg) + } + Err(e) => RoomMessageEventContent::text_plain(e.to_string()), + }, + AdminCommand::IncomingFederation => { + let map = services().globals.roomid_federationhandletime.read().unwrap(); + let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); + + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + msg += &format!( + "{} {}: {}m{}s\n", + r, + e, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } RoomMessageEventContent::text_plain(&msg) } - Err(e) => RoomMessageEventContent::text_plain(e.to_string()), - }, - AdminCommand::IncomingFederation => { - let map = services().globals.roomid_federationhandletime.read().unwrap(); - let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); - - for (r, (e, i)) in map.iter() { - let elapsed = i.elapsed(); - msg += &format!( - "{} {}: {}m{}s\n", - r, - e, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - RoomMessageEventContent::text_plain(&msg) - } - AdminCommand::GetAuthChain { event_id } => { - let event_id = Arc::::from(event_id); - if let Some(event) = services().rooms.get_pdu_json(&event_id)? { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { - Error::bad_database("Invalid room id field in event in database") - })?; - let start = Instant::now(); - let count = server_server::get_auth_chain(room_id, vec![event_id]) - .await? - .count(); - let elapsed = start.elapsed(); - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )) - } else { - RoomMessageEventContent::text_plain("Event not found.") + AdminCommand::GetAuthChain { event_id } => { + let event_id = Arc::::from(event_id); + if let Some(event) = services().rooms.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; + let start = Instant::now(); + let count = server_server::get_auth_chain(room_id, vec![event_id]) + .await? + .count(); + let elapsed = start.elapsed(); + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )) + } else { + RoomMessageEventContent::text_plain("Event not found.") + } } - } - AdminCommand::ParsePdu => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { - Ok(hash) => { - let event_id = EventId::parse(format!("${}", hash)); - - match serde_json::from_value::( - serde_json::to_value(value).expect("value is json"), - ) { - Ok(pdu) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\n{:#?}", - event_id, pdu - )), - Err(e) => RoomMessageEventContent::text_plain(format!( - "EventId: {:?}\nCould not parse event: {}", - event_id, e - )), + AdminCommand::ParsePdu => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + Ok(hash) => { + let event_id = EventId::parse(format!("${}", hash)); + + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + Ok(pdu) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\n{:#?}", + event_id, pdu + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), + } } + Err(e) => RoomMessageEventContent::text_plain(format!( + "Could not parse PDU JSON: {:?}", + e + )), } - Err(e) => RoomMessageEventContent::text_plain(format!( - "Could not parse PDU JSON: {:?}", - e - )), } + Err(e) => RoomMessageEventContent::text_plain(format!( + "Invalid json in command body: {}", + e + )), } - Err(e) => RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {}", - e - )), + } else { + RoomMessageEventContent::text_plain("Expected code block in command body.") } - } else { - RoomMessageEventContent::text_plain("Expected code block in command body.") - } - } - AdminCommand::GetPdu { event_id } => { - let mut outlier = false; - let mut pdu_json = services().rooms.get_non_outlier_pdu_json(&event_id)?; - if pdu_json.is_none() { - outlier = true; - pdu_json = services().rooms.get_pdu_json(&event_id)?; } - match pdu_json { - Some(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); - RoomMessageEventContent::text_html( - format!( - "{}\n```json\n{}\n```", - if outlier { - "PDU is outlier" - } else { - "PDU was accepted" - }, - json_text - ), - format!( - "

                {}

                \n
                {}\n
                \n", - if outlier { - "PDU is outlier" - } else { - "PDU was accepted" - }, - HtmlEscape(&json_text) - ), - ) + AdminCommand::GetPdu { event_id } => { + let mut outlier = false; + let mut pdu_json = services().rooms.get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = services().rooms.get_pdu_json(&event_id)?; } - None => RoomMessageEventContent::text_plain("PDU not found."), - } - } - AdminCommand::DatabaseMemoryUsage => match services()._db.memory_usage() { - Ok(response) => RoomMessageEventContent::text_plain(response), - Err(e) => RoomMessageEventContent::text_plain(format!( - "Failed to get database memory usage: {}", - e - )), - }, - AdminCommand::ShowConfig => { - // Construct and send the response - RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) - } - AdminCommand::ResetPassword { username } => { - let user_id = match UserId::parse_with_server_name( - username.as_str().to_lowercase(), - services().globals.server_name(), - ) { - Ok(id) => id, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {}", - e - ))) + match pdu_json { + Some(json) => { + let json_text = + serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + json_text + ), + format!( + "

                {}

                \n
                {}\n
                \n", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + HtmlEscape(&json_text) + ), + ) + } + None => RoomMessageEventContent::text_plain("PDU not found."), } - }; - - // Check if the specified user is valid - if !services().users.exists(&user_id)? - || services().users.is_deactivated(&user_id)? - || user_id - == UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("conduit user exists") - { - return Ok(RoomMessageEventContent::text_plain( - "The specified user does not exist or is deactivated!", - )); } - - let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); - - match services().users.set_password(&user_id, Some(new_password.as_str())) { - Ok(()) => RoomMessageEventContent::text_plain(format!( - "Successfully reset the password for user {}: {}", - user_id, new_password - )), + AdminCommand::DatabaseMemoryUsage => match services()._db.memory_usage() { + Ok(response) => RoomMessageEventContent::text_plain(response), Err(e) => RoomMessageEventContent::text_plain(format!( - "Couldn't reset the password for user {}: {}", - user_id, e + "Failed to get database memory usage: {}", + e )), + }, + AdminCommand::ShowConfig => { + // Construct and send the response + RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) } - } - AdminCommand::CreateUser { username, password } => { - let password = password.unwrap_or(utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); - // Validate user id - let user_id = match UserId::parse_with_server_name( - username.as_str().to_lowercase(), - services().globals.server_name(), - ) { - Ok(id) => id, - Err(e) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "The supplied username is not a valid username: {}", - e - ))) + AdminCommand::ResetPassword { username } => { + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + services().globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + + // Check if the specified user is valid + if !services().users.exists(&user_id)? + || services().users.is_deactivated(&user_id)? + || user_id + == UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain( + "The specified user does not exist or is deactivated!", + )); + } + + let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); + + match services().users.set_password(&user_id, Some(new_password.as_str())) { + Ok(()) => RoomMessageEventContent::text_plain(format!( + "Successfully reset the password for user {}: {}", + user_id, new_password + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Couldn't reset the password for user {}: {}", + user_id, e + )), } - }; - if user_id.is_historical() { - return Ok(RoomMessageEventContent::text_plain(format!( - "userid {user_id} is not allowed due to historical" - ))); - } - if services().users.exists(&user_id)? { - return Ok(RoomMessageEventContent::text_plain(format!( - "userid {user_id} already exists" - ))); } - // Create user - services().users.create(&user_id, Some(password.as_str()))?; - - // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); - services().users - .set_displayname(&user_id, Some(displayname.clone()))?; - - // Initial account data - services().account_data.update( - None, - &user_id, - ruma::events::GlobalAccountDataEventType::PushRules - .to_string() - .into(), - &ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: ruma::push::Ruleset::server_default(&user_id), + AdminCommand::CreateUser { username, password } => { + let password = password.unwrap_or(utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + // Validate user id + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + services().globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + if user_id.is_historical() { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} is not allowed due to historical" + ))); + } + if services().users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} already exists" + ))); + } + // Create user + services().users.create(&user_id, Some(password.as_str()))?; + + // Default to pretty displayname + let displayname = format!("{} ⚡️", user_id.localpart()); + services().users + .set_displayname(&user_id, Some(displayname.clone()))?; + + // Initial account data + services().account_data.update( + None, + &user_id, + ruma::events::GlobalAccountDataEventType::PushRules + .to_string() + .into(), + &ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::push::Ruleset::server_default(&user_id), + }, }, - }, - )?; + )?; - // we dont add a device since we're not the user, just the creator + // we dont add a device since we're not the user, just the creator - // Inhibit login does not work for guests - RoomMessageEventContent::text_plain(format!( - "Created user with user_id: {user_id} and password: {password}" - )) - } - AdminCommand::DisableRoom { room_id } => { - services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; - RoomMessageEventContent::text_plain("Room disabled.") - } - AdminCommand::EnableRoom { room_id } => { - services().rooms.disabledroomids.remove(room_id.as_bytes())?; - RoomMessageEventContent::text_plain("Room enabled.") - } - AdminCommand::DeactivateUser { - leave_rooms, - user_id, - } => { - let user_id = Arc::::from(user_id); - if services().users.exists(&user_id)? { + // Inhibit login does not work for guests RoomMessageEventContent::text_plain(format!( - "Making {} leave all rooms before deactivation...", - user_id - )); + "Created user with user_id: {user_id} and password: {password}" + )) + } + AdminCommand::DisableRoom { room_id } => { + services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; + RoomMessageEventContent::text_plain("Room disabled.") + } + AdminCommand::EnableRoom { room_id } => { + services().rooms.disabledroomids.remove(room_id.as_bytes())?; + RoomMessageEventContent::text_plain("Room enabled.") + } + AdminCommand::DeactivateUser { + leave_rooms, + user_id, + } => { + let user_id = Arc::::from(user_id); + if services().users.exists(&user_id)? { + RoomMessageEventContent::text_plain(format!( + "Making {} leave all rooms before deactivation...", + user_id + )); - services().users.deactivate_account(&user_id)?; + services().users.deactivate_account(&user_id)?; - if leave_rooms { - services().rooms.leave_all_rooms(&user_id).await?; - } + if leave_rooms { + services().rooms.leave_all_rooms(&user_id).await?; + } - RoomMessageEventContent::text_plain(format!( - "User {} has been deactivated", - user_id - )) - } else { - RoomMessageEventContent::text_plain(format!( - "User {} doesn't exist on this server", - user_id - )) + RoomMessageEventContent::text_plain(format!( + "User {} has been deactivated", + user_id + )) + } else { + RoomMessageEventContent::text_plain(format!( + "User {} doesn't exist on this server", + user_id + )) + } } - } - AdminCommand::DeactivateAll { leave_rooms, force } => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { - let usernames = body.clone().drain(1..body.len() - 1).collect::>(); - - let mut user_ids: Vec<&UserId> = Vec::new(); - - for &username in &usernames { - match <&UserId>::try_from(username) { - Ok(user_id) => user_ids.push(user_id), - Err(_) => { - return Ok(RoomMessageEventContent::text_plain(format!( - "{} is not a valid username", - username - ))) + AdminCommand::DeactivateAll { leave_rooms, force } => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + let usernames = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut user_ids: Vec<&UserId> = Vec::new(); + + for &username in &usernames { + match <&UserId>::try_from(username) { + Ok(user_id) => user_ids.push(user_id), + Err(_) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "{} is not a valid username", + username + ))) + } } } - } - let mut deactivation_count = 0; - let mut admins = Vec::new(); + let mut deactivation_count = 0; + let mut admins = Vec::new(); + + if !force { + user_ids.retain(|&user_id| { + match services().users.is_admin(user_id) { + Ok(is_admin) => match is_admin { + true => { + admins.push(user_id.localpart()); + false + } + false => true, + }, + Err(_) => false, + } + }) + } - if !force { - user_ids.retain(|&user_id| { - match services().users.is_admin(user_id) { - Ok(is_admin) => match is_admin { - true => { - admins.push(user_id.localpart()); - false - } - false => true, - }, - Err(_) => false, + for &user_id in &user_ids { + match services().users.deactivate_account(user_id) { + Ok(_) => deactivation_count += 1, + Err(_) => {} } - }) - } - - for &user_id in &user_ids { - match services().users.deactivate_account(user_id) { - Ok(_) => deactivation_count += 1, - Err(_) => {} } - } - if leave_rooms { - for &user_id in &user_ids { - let _ = services().rooms.leave_all_rooms(user_id).await; + if leave_rooms { + for &user_id in &user_ids { + let _ = services().rooms.leave_all_rooms(user_id).await; + } } - } - if admins.is_empty() { - RoomMessageEventContent::text_plain(format!( - "Deactivated {} accounts.", - deactivation_count - )) + if admins.is_empty() { + RoomMessageEventContent::text_plain(format!( + "Deactivated {} accounts.", + deactivation_count + )) + } else { + RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) + } } else { - RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) } - } else { - RoomMessageEventContent::text_plain( - "Expected code block in command body. Add --help for details.", - ) } - } - }; + }; - Ok(reply_message_content) -} + Ok(reply_message_content) + } -// Utility to turn clap's `--help` text to HTML. -fn usage_to_html(text: &str, server_name: &ServerName) -> String { - // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` - let text = text.replace( - &format!("@conduit:{}:-", server_name), - &format!("@conduit:{}: ", server_name), - ); - - // For the conduit admin room, subcommands become main commands - let text = text.replace("SUBCOMMAND", "COMMAND"); - let text = text.replace("subcommand", "command"); - - // Escape option names (e.g. ``) since they look like HTML tags - let text = text.replace("<", "<").replace(">", ">"); - - // Italicize the first line (command name and version text) - let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "$1\n"); - - // Unmerge wrapped lines - let text = text.replace("\n ", " "); - - // Wrap option names in backticks. The lines look like: - // -V, --version Prints version information - // And are converted to: - // -V, --version: Prints version information - // (?m) enables multi-line mode for ^ and $ - let re = Regex::new("(?m)^ (([a-zA-Z_&;-]+(, )?)+) +(.*)$") - .expect("Regex compilation should not fail"); - let text = re.replace_all(&text, "$1: $4"); - - // Look for a `[commandbody]` tag. If it exists, use all lines below it that - // start with a `#` in the USAGE section. - let mut text_lines: Vec<&str> = text.lines().collect(); - let mut command_body = String::new(); - - if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { - text_lines.remove(line_index); - - while text_lines - .get(line_index) - .map(|line| line.starts_with("#")) - .unwrap_or(false) - { - command_body += if text_lines[line_index].starts_with("# ") { - &text_lines[line_index][2..] - } else { - &text_lines[line_index][1..] - }; - command_body += "[nobr]\n"; + // Utility to turn clap's `--help` text to HTML. + fn usage_to_html(&self, text: &str, server_name: &ServerName) -> String { + // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` + let text = text.replace( + &format!("@conduit:{}:-", server_name), + &format!("@conduit:{}: ", server_name), + ); + + // For the conduit admin room, subcommands become main commands + let text = text.replace("SUBCOMMAND", "COMMAND"); + let text = text.replace("subcommand", "command"); + + // Escape option names (e.g. ``) since they look like HTML tags + let text = text.replace("<", "<").replace(">", ">"); + + // Italicize the first line (command name and version text) + let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1\n"); + + // Unmerge wrapped lines + let text = text.replace("\n ", " "); + + // Wrap option names in backticks. The lines look like: + // -V, --version Prints version information + // And are converted to: + // -V, --version: Prints version information + // (?m) enables multi-line mode for ^ and $ + let re = Regex::new("(?m)^ (([a-zA-Z_&;-]+(, )?)+) +(.*)$") + .expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1: $4"); + + // Look for a `[commandbody]` tag. If it exists, use all lines below it that + // start with a `#` in the USAGE section. + let mut text_lines: Vec<&str> = text.lines().collect(); + let mut command_body = String::new(); + + if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { text_lines.remove(line_index); - } - } - let text = text_lines.join("\n"); + while text_lines + .get(line_index) + .map(|line| line.starts_with("#")) + .unwrap_or(false) + { + command_body += if text_lines[line_index].starts_with("# ") { + &text_lines[line_index][2..] + } else { + &text_lines[line_index][1..] + }; + command_body += "[nobr]\n"; + text_lines.remove(line_index); + } + } - // Improve the usage section - let text = if command_body.is_empty() { - // Wrap the usage line in code tags - let re = Regex::new("(?m)^USAGE:\n (@conduit:.*)$") - .expect("Regex compilation should not fail"); - re.replace_all(&text, "USAGE:\n$1").to_string() - } else { - // Wrap the usage line in a code block, and add a yaml block example - // This makes the usage of e.g. `register-appservice` more accurate - let re = - Regex::new("(?m)^USAGE:\n (.*?)\n\n").expect("Regex compilation should not fail"); - re.replace_all(&text, "USAGE:\n
                $1[nobr]\n[commandbodyblock]
                ") - .replace("[commandbodyblock]", &command_body) - }; - - // Add HTML line-breaks - let text = text - .replace("\n\n\n", "\n\n") - .replace("\n", "
                \n") - .replace("[nobr]
                ", ""); - - text -} + let text = text_lines.join("\n"); + + // Improve the usage section + let text = if command_body.is_empty() { + // Wrap the usage line in code tags + let re = Regex::new("(?m)^USAGE:\n (@conduit:.*)$") + .expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n$1").to_string() + } else { + // Wrap the usage line in a code block, and add a yaml block example + // This makes the usage of e.g. `register-appservice` more accurate + let re = + Regex::new("(?m)^USAGE:\n (.*?)\n\n").expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n
                $1[nobr]\n[commandbodyblock]
                ") + .replace("[commandbodyblock]", &command_body) + }; + + // Add HTML line-breaks + let text = text + .replace("\n\n\n", "\n\n") + .replace("\n", "
                \n") + .replace("[nobr]
                ", ""); + + text + } -/// Create the admin room. -/// -/// Users in this room are considered admins by conduit, and the room can be -/// used to issue admin commands by talking to the server user inside it. -pub(crate) async fn create_admin_room() -> Result<()> { - let room_id = RoomId::new(services().globals.server_name()); - - services().rooms.get_or_create_shortroomid(&room_id)?; - - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); - - services().users.create(&conduit_user, None)?; - - let mut content = RoomCreateEventContent::new(conduit_user.clone()); - content.federate = true; - content.predecessor = None; - content.room_version = RoomVersionId::V6; - - // 1. The room create event - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // 2. Make conduit bot join - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(conduit_user.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // 3. Power levels - let mut users = BTreeMap::new(); - users.insert(conduit_user.clone(), 100.into()); - - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // 4.1 Join Rules - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + /// Create the admin room. + /// + /// Users in this room are considered admins by conduit, and the room can be + /// used to issue admin commands by talking to the server user inside it. + pub(crate) async fn create_admin_room(&self) -> Result<()> { + let room_id = RoomId::new(services().globals.server_name()); + + services().rooms.get_or_create_shortroomid(&room_id)?; + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Create a user for the server + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); + + services().users.create(&conduit_user, None)?; + + let mut content = RoomCreateEventContent::new(conduit_user.clone()); + content.federate = true; + content.predecessor = None; + content.room_version = RoomVersionId::V6; + + // 1. The room create event + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 2. Make conduit bot join + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // 4.2 History Visibility - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // 4.3 Guest Access - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 3. Power levels + let mut users = BTreeMap::new(); + users.insert(conduit_user.clone(), 100.into()); + + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // 5. Events implied by name and topic - let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) - .expect("Room name is valid"); - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 4.1 Join Rules + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 4.2 History Visibility + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", services().globals.server_name()), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // 6. Room alias - let alias: Box = format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - services().rooms.set_alias(&alias, Some(&room_id))?; - - Ok(()) -} - -/// Invite the user to the conduit admin room. -/// -/// In conduit, this is equivalent to granting admin privileges. -pub(crate) async fn make_user_admin( - user_id: &UserId, - displayname: String, -) -> Result<()> { - let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - let room_id = services() - .rooms - .id_from_alias(&admin_room_alias)? - .expect("Admin room must exist"); - - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Use the server user to grant the new admin's power level - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); - - // Invite and join the real user - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: Some(displayname), - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &user_id, - &room_id, - &state_lock, - )?; - - // Set power level - let mut users = BTreeMap::new(); - users.insert(conduit_user.to_owned(), 100.into()); - users.insert(user_id.to_owned(), 100.into()); - - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - - // Send welcome message - services().rooms.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMessage, - content: to_raw_value(&RoomMessageEventContent::text_html( - format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()).to_owned(), - format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()).to_owned(), - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 4.3 Guest Access + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 5. Events implied by name and topic + let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) + .expect("Room name is valid"); + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: format!("Manage {}", services().globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 6. Room alias + let alias: Box = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + services().rooms.set_alias(&alias, Some(&room_id))?; + + Ok(()) + } - Ok(()) + /// Invite the user to the conduit admin room. + /// + /// In conduit, this is equivalent to granting admin privileges. + pub(crate) async fn make_user_admin( + &self, + user_id: &UserId, + displayname: String, + ) -> Result<()> { + let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + let room_id = services() + .rooms + .id_from_alias(&admin_room_alias)? + .expect("Admin room must exist"); + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Use the server user to grant the new admin's power level + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); + + // Invite and join the real user + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &user_id, + &room_id, + &state_lock, + )?; + + // Set power level + let mut users = BTreeMap::new(); + users.insert(conduit_user.to_owned(), 100.into()); + users.insert(user_id.to_owned(), 100.into()); + + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // Send welcome message + services().rooms.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&RoomMessageEventContent::text_html( + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()).to_owned(), + format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()).to_owned(), + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + Ok(()) + } } diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index eed84d59..cd48e85d 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,5 +1,6 @@ +use crate::Result; + pub trait Data { - type Iter: Iterator; /// Registers an appservice and returns the ID to the caller fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; @@ -12,7 +13,7 @@ pub trait Data { fn get_registration(&self, id: &str) -> Result>; - fn iter_ids(&self) -> Result>>; + fn iter_ids(&self) -> Result>>>; fn all(&self) -> Result>; } diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index ec4ffc56..63fa3afe 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,13 +1,13 @@ mod data; pub use data::Data; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Registers an appservice and returns the ID to the caller pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { self.db.register_appservice(yaml) diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs new file mode 100644 index 00000000..f36ab61b --- /dev/null +++ b/src/service/globals/data.rs @@ -0,0 +1,8 @@ +use ruma::signatures::Ed25519KeyPair; + +use crate::Result; + +pub trait Data { + fn load_keypair(&self) -> Result; + fn remove_keypair(&self) -> Result<()>; +} diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 2b47e5b1..556ca71c 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -3,7 +3,7 @@ pub use data::Data; use crate::service::*; -use crate::{database::Config, server_server::FedDest, utils, Error, Result}; +use crate::{Config, utils, Error, Result}; use ruma::{ api::{ client::sync::sync_events, @@ -25,8 +25,6 @@ use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; use tracing::error; use trust_dns_resolver::TokioAsyncResolver; -use super::abstraction::Tree; - pub const COUNTER: &[u8] = b"c"; type WellKnownMap = HashMap, (FedDest, String)>; @@ -93,47 +91,18 @@ impl Default for RotationHandler { } -impl Service<_> { +impl Service { pub fn load( - globals: Arc, - server_signingkeys: Arc, + db: D, config: Config, ) -> Result { - let keypair_bytes = globals.get(b"keypair")?.map_or_else( - || { - let keypair = utils::generate_keypair(); - globals.insert(b"keypair", &keypair)?; - Ok::<_, Error>(keypair) - }, - |s| Ok(s.to_vec()), - )?; - - let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff); - - let keypair = utils::string_from_bytes( - // 1. version - parts - .next() - .expect("splitn always returns at least one element"), - ) - .map_err(|_| Error::bad_database("Invalid version bytes in keypair.")) - .and_then(|version| { - // 2. key - parts - .next() - .ok_or_else(|| Error::bad_database("Invalid keypair format in database.")) - .map(|key| (version, key)) - }) - .and_then(|(version, key)| { - ruma::signatures::Ed25519KeyPair::from_der(key, version) - .map_err(|_| Error::bad_database("Private or public keys are invalid.")) - }); + let keypair = db.load_keypair(); let keypair = match keypair { Ok(k) => k, Err(e) => { error!("Keypair invalid. Deleting..."); - globals.remove(b"keypair")?; + db.remove_keypair(); return Err(e); } }; @@ -167,7 +136,7 @@ impl Service<_> { let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let mut s = Self { - globals, + db, config, keypair: Arc::new(keypair), dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { @@ -181,7 +150,6 @@ impl Service<_> { tls_name_override, federation_client, default_client, - server_signingkeys, jwt_decoding_key, stable_room_versions, unstable_room_versions, diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index be1d6b18..6f6359eb 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -1,371 +1,85 @@ -use crate::{utils, Error, Result, services}; -use ruma::{ - api::client::{ - backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - error::ErrorKind, - }, - serde::Raw, - RoomId, UserId, -}; -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; -impl KeyBackups { - pub fn create_backup( +use ruma::{api::client::backup::{BackupAlgorithm, RoomKeyBackup, KeyBackupData}, serde::Raw, UserId, RoomId}; +use crate::Result; + +pub trait Data { + fn create_backup( &self, user_id: &UserId, backup_metadata: &Raw, - ) -> Result { - let version = services().globals.next_count()?.to_string(); - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.insert( - &key, - &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), - )?; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - Ok(version) - } - - pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.remove(&key)?; - self.backupid_etag.remove(&key)?; + ) -> Result; - key.push(0xff); + fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()>; - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } - - pub fn update_backup( + fn update_backup( &self, user_id: &UserId, version: &str, backup_metadata: &Raw, - ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Tried to update nonexistent backup.", - )); - } - - self.backupid_algorithm - .insert(&key, backup_metadata.json().get().as_bytes())?; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - Ok(version.to_owned()) - } - - pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + ) -> Result; - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, _)| { - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) - }) - .transpose() - } + fn get_latest_backup_version(&self, user_id: &UserId) -> Result>; - pub fn get_latest_backup( + fn get_latest_backup( &self, user_id: &UserId, - ) -> Result)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + ) -> Result)>>; - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, value)| { - let version = utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; - - Ok(( - version, - serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("Algorithm in backupid_algorithm is invalid.") - })?, - )) - }) - .transpose() - } - - pub fn get_backup( + fn get_backup( &self, user_id: &UserId, version: &str, - ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm - .get(&key)? - .map_or(Ok(None), |bytes| { - serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid.")) - }) - } + ) -> Result>>; - pub fn add_key( + fn add_key( &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, key_data: &Raw, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Tried to update nonexistent backup.", - )); - } - - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .insert(&key, key_data.json().get().as_bytes())?; - - Ok(()) - } - - pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); + ) -> Result<()>; - Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) - } + fn count_keys(&self, user_id: &UserId, version: &str) -> Result; - pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); + fn get_etag(&self, user_id: &UserId, version: &str) -> Result; - Ok(utils::u64_from_bytes( - &self - .backupid_etag - .get(&key)? - .ok_or_else(|| Error::bad_database("Backup has no etag."))?, - ) - .map_err(|_| Error::bad_database("etag in backupid_etag invalid."))? - .to_string()) - } - - pub fn get_all( + fn get_all( &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); - - let mut rooms = BTreeMap::, RoomKeyBackup>::new(); - - for result in self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); - - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let room_id = RoomId::parse( - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, - ) - .map_err(|_| { - Error::bad_database("backupkeyid_backup room_id is invalid room id.") - })?; + ) -> Result, RoomKeyBackup>>; - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; - - Ok::<_, Error>((room_id, session_id, key_data)) - }) - { - let (room_id, session_id, key_data) = result?; - rooms - .entry(room_id) - .or_insert_with(|| RoomKeyBackup { - sessions: BTreeMap::new(), - }) - .sessions - .insert(session_id, key_data); - } - - Ok(rooms) - } - - pub fn get_room( + fn get_room( &self, user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - Ok(self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); - - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; + ) -> Result>>; - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; - - Ok::<_, Error>((session_id, key_data)) - }) - .filter_map(|r| r.ok()) - .collect()) - } - - pub fn get_session( + fn get_session( &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .get(&key)? - .map(|value| { - serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - }) - }) - .transpose() - } - - pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } + ) -> Result>>; - Ok(()) - } + fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()>; - pub fn delete_room_keys( + fn delete_room_keys( &self, user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); + ) -> Result<()>; - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } - - pub fn delete_room_key( + fn delete_room_key( &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) - } + ) -> Result<()>; } diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index be1d6b18..8e842d4e 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,3 +1,6 @@ +mod data; +pub use data::Data; + use crate::{utils, Error, Result, services}; use ruma::{ api::client::{ @@ -9,7 +12,11 @@ use ruma::{ }; use std::{collections::BTreeMap, sync::Arc}; -impl KeyBackups { +pub struct Service { + db: D, +} + +impl Service { pub fn create_backup( &self, user_id: &UserId, diff --git a/src/service/media/data.rs b/src/service/media/data.rs new file mode 100644 index 00000000..94975de7 --- /dev/null +++ b/src/service/media/data.rs @@ -0,0 +1,8 @@ +use crate::Result; + +pub trait Data { + fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: &Option<&str>, content_type: &Option<&str>) -> Result>; + + /// Returns content_disposition, content_type and the metadata key. + fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)>; +} diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 1bdf6d47..a5aca036 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,7 +1,8 @@ -use image::{imageops::FilterType, GenericImageView}; +mod data; +pub use data::Data; -use super::abstraction::Tree; -use crate::{utils, Error, Result}; +use image::{imageops::FilterType, GenericImageView}; +use crate::{utils, Error, Result, services}; use std::{mem, sync::Arc}; use tokio::{ fs::File, @@ -14,44 +15,25 @@ pub struct FileMeta { pub file: Vec, } -pub struct Media { - pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType +pub struct Service { + db: D, } -impl Media { +impl Service { /// Uploads a file. pub async fn create( &self, mxc: String, - globals: &Globals, content_disposition: &Option<&str>, content_type: &Option<&str>, file: &[u8], ) -> Result<()> { - let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xff); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); + // Width, Height = 0 if it's not a thumbnail + let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type); - let path = globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; f.write_all(file).await?; - - self.mediaid_file.insert(&key, &[])?; Ok(()) } @@ -60,80 +42,28 @@ impl Media { pub async fn upload_thumbnail( &self, mxc: String, - globals: &Globals, content_disposition: &Option, content_type: &Option, width: u32, height: u32, file: &[u8], ) -> Result<()> { - let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&width.to_be_bytes()); - key.extend_from_slice(&height.to_be_bytes()); - key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xff); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); + let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type); - let path = globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; f.write_all(file).await?; - self.mediaid_file.insert(&key, &[])?; - Ok(()) } /// Downloads a file. - pub async fn get(&self, globals: &Globals, mxc: &str) -> Result> { - let mut prefix = mxc.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - prefix.push(0xff); - - let first = self.mediaid_file.scan_prefix(prefix).next(); - if let Some((key, _)) = first { - let path = globals.get_media_file(&key); + pub async fn get(&self, mxc: String) -> Result> { + if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { + let path = services().globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; Ok(Some(FileMeta { content_disposition, @@ -170,8 +100,7 @@ impl Media { /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. pub async fn get_thumbnail( &self, - mxc: &str, - globals: &Globals, + mxc: String, width: u32, height: u32, ) -> Result> { @@ -179,89 +108,23 @@ impl Media { .thumbnail_properties(width, height) .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - let mut main_prefix = mxc.as_bytes().to_vec(); - main_prefix.push(0xff); - - let mut thumbnail_prefix = main_prefix.clone(); - thumbnail_prefix.extend_from_slice(&width.to_be_bytes()); - thumbnail_prefix.extend_from_slice(&height.to_be_bytes()); - thumbnail_prefix.push(0xff); - - let mut original_prefix = main_prefix; - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - original_prefix.push(0xff); - - let first_thumbnailprefix = self.mediaid_file.scan_prefix(thumbnail_prefix).next(); - let first_originalprefix = self.mediaid_file.scan_prefix(original_prefix).next(); - if let Some((key, _)) = first_thumbnailprefix { + if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, width, height) { // Using saved thumbnail - let path = globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database("Content Disposition in db is invalid.") - })?, - ) - }; Ok(Some(FileMeta { content_disposition, content_type, file: file.to_vec(), })) - } else if let Some((key, _)) = first_originalprefix { + } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { // Generate a thumbnail - let path = globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; - if let Ok(image) = image::load_from_memory(&file) { let original_width = image.width(); let original_height = image.height(); @@ -317,26 +180,12 @@ impl Media { thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; // Save thumbnail in database so we don't have to generate it again next time - let mut thumbnail_key = key.to_vec(); - let width_index = thumbnail_key - .iter() - .position(|&b| b == 0xff) - .ok_or_else(|| Error::bad_database("Media in db is invalid."))? - + 1; - let mut widthheight = width.to_be_bytes().to_vec(); - widthheight.extend_from_slice(&height.to_be_bytes()); + let thumbnail_key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?; - thumbnail_key.splice( - width_index..width_index + 2 * mem::size_of::(), - widthheight, - ); - - let path = globals.get_media_file(&thumbnail_key); + let path = services().globals.get_media_file(&thumbnail_key); let mut f = File::create(path).await?; f.write_all(&thumbnail_bytes).await?; - self.mediaid_file.insert(&thumbnail_key, &[])?; - Ok(Some(FileMeta { content_disposition, content_type, diff --git a/src/service/mod.rs b/src/service/mod.rs index 80239cbf..4364c72e 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,28 +1,29 @@ -pub mod pdu; -pub mod appservice; -pub mod pusher; -pub mod rooms; -pub mod transaction_ids; -pub mod uiaa; -pub mod users; pub mod account_data; pub mod admin; +pub mod appservice; pub mod globals; pub mod key_backups; pub mod media; +pub mod pdu; +pub mod pusher; +pub mod rooms; pub mod sending; +pub mod transaction_ids; +pub mod uiaa; +pub mod users; -pub struct Services { +pub struct Services +{ pub appservice: appservice::Service, pub pusher: pusher::Service, pub rooms: rooms::Service, pub transaction_ids: transaction_ids::Service, pub uiaa: uiaa::Service, pub users: users::Service, - //pub account_data: account_data::Service, - //pub admin: admin::Service, + pub account_data: account_data::Service, + pub admin: admin::Service, pub globals: globals::Service, - //pub key_backups: key_backups::Service, - //pub media: media::Service, - //pub sending: sending::Service, + pub key_backups: key_backups::Service, + pub media: media::Service, + pub sending: sending::Service, } diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 47e21a60..2ed79f2c 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,4 +1,4 @@ -use crate::{Database, Error, services}; +use crate::{Error, services}; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, @@ -357,7 +357,7 @@ pub(crate) fn gen_event_id_canonical_json( Ok((event_id, value)) } -/// Build the start of a PDU in order to add it to the `Database`. +/// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] pub struct PduBuilder { #[serde(rename = "type")] diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index ef2b8193..3951da79 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,4 +1,5 @@ use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; +use crate::Result; pub trait Data { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; @@ -10,5 +11,5 @@ pub trait Data { fn get_pusher_senderkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a; + ) -> Box>>; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 87e91a14..66a8ae36 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use crate::{services, Error, PduEvent}; +use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; use ruma::{ api::{ @@ -27,7 +27,7 @@ pub struct Service { db: D, } -impl Service<_> { +impl Service { pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { self.db.set_pusher(sender, pusher) } diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 655f32aa..c5d45e36 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,24 +1,29 @@ use ruma::{RoomId, RoomAliasId}; +use crate::Result; pub trait Data { /// Creates or updates the alias to the given room id. fn set_alias( + &self, alias: &RoomAliasId, room_id: &RoomId ) -> Result<()>; /// Forgets about an alias. Returns an error if the alias did not exist. fn remove_alias( + &self, alias: &RoomAliasId, ) -> Result<()>; /// Looks up the roomid for the given alias. fn resolve_local_alias( + &self, alias: &RoomAliasId, - ) -> Result<()>; + ) -> Result>>; /// Returns all local aliases that point to the given room fn local_aliases_for_room( - alias: &RoomAliasId, - ) -> Result<()>; + &self, + room_id: &RoomId, + ) -> Result>>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index f46609aa..abe299d4 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,12 +1,14 @@ mod data; pub use data::Data; + use ruma::{RoomAliasId, RoomId}; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn set_alias( &self, @@ -26,7 +28,7 @@ impl Service<_> { #[tracing::instrument(skip(self))] pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { - self.db.resolve_local_alias(alias: &RoomAliasId) + self.db.resolve_local_alias(alias) } #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 88c86fad..5177d6d6 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,6 +1,7 @@ use std::collections::HashSet; +use crate::Result; pub trait Data { - fn get_cached_eventid_authchain<'a>() -> Result>; - fn cache_eventid_authchain<'a>(shorteventid: u64, auth_chain: &HashSet) -> Result>; + fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>; + fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index e17c10a1..113d2e81 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -3,13 +3,13 @@ use std::{sync::Arc, collections::HashSet}; pub use data::Data; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn get_cached_eventid_authchain<'a>( &'a self, diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index e28cdd12..13767217 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,15 +1,16 @@ use ruma::RoomId; +use crate::Result; pub trait Data { /// Adds the room to the public room directory - fn set_public(room_id: &RoomId) -> Result<()>; + fn set_public(&self, room_id: &RoomId) -> Result<()>; /// Removes the room from the public room directory. - fn set_not_public(room_id: &RoomId) -> Result<()>; + fn set_not_public(&self, room_id: &RoomId) -> Result<()>; /// Returns true if the room is in the public room directory. - fn is_public_room(room_id: &RoomId) -> Result; + fn is_public_room(&self, room_id: &RoomId) -> Result; /// Returns the unsorted public room directory - fn public_rooms() -> impl Iterator>> + '_; + fn public_rooms(&self) -> Box>>>; } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index cb9cda86..68535057 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -2,13 +2,13 @@ mod data; pub use data::Data; use ruma::RoomId; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId) -> Result<()> { self.db.set_public(room_id) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index 5566fb2c..a5ce37f1 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -2,7 +2,9 @@ pub mod presence; pub mod read_receipt; pub mod typing; -pub struct Service { +pub trait Data: presence::Data + read_receipt::Data + typing::Data {} + +pub struct Service { presence: presence::Service, read_receipt: read_receipt::Service, typing: typing::Service, diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 8e3c672f..ca0e2410 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,6 +1,7 @@ use std::collections::HashMap; use ruma::{UserId, RoomId, events::presence::PresenceEvent}; +use crate::Result; pub trait Data { /// Adds a presence event which will be saved until a new event replaces it. diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 5a988d4f..646cf549 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -4,13 +4,13 @@ use std::collections::HashMap; pub use data::Data; use ruma::{RoomId, UserId, events::presence::PresenceEvent}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 32b091f2..e8ed9656 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,4 +1,5 @@ use ruma::{RoomId, events::receipt::ReceiptEvent, UserId, serde::Raw}; +use crate::Result; pub trait Data { /// Replaces the previous read receipt. @@ -14,13 +15,13 @@ pub trait Data { &self, room_id: &RoomId, since: u64, - ) -> impl Iterator< + ) -> Box, u64, Raw, )>, - >; + >>; /// Sets a private read marker at `count`. fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 744fece1..3f0b1476 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,12 +1,14 @@ mod data; pub use data::Data; + use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Replaces the previous read receipt. pub fn readreceipt_update( &self, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 0c773135..ec0be466 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,5 +1,5 @@ use std::collections::HashSet; - +use crate::Result; use ruma::{UserId, RoomId}; pub trait Data { @@ -14,5 +14,5 @@ pub trait Data { fn last_typing_update(&self, room_id: &RoomId) -> Result; /// Returns all user ids currently typing. - fn typings_all(&self, room_id: &RoomId) -> Result>; + fn typings_all(&self, room_id: &RoomId) -> Result>>; } diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 68b9fd83..00cfdecb 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,14 +1,14 @@ mod data; pub use data::Data; -use ruma::{UserId, RoomId}; +use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 71529570..c9b041c2 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -250,7 +250,7 @@ impl Service { // We go through all the signatures we see on the value and fetch the corresponding signing // keys - self.fetch_required_signing_keys(&value, pub_key_map, db) + self.fetch_required_signing_keys(&value, pub_key_map) .await?; // 2. Check signatures, otherwise drop @@ -1153,6 +1153,11 @@ impl Service { let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; + let first_pdu_in_room = services() + .rooms + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + let mut amount = 0; while let Some(prev_event_id) = todo_outlier_stack.pop() { diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 52a683d3..5fefd3f8 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,4 +1,5 @@ use ruma::{RoomId, DeviceId, UserId}; +use crate::Result; pub trait Data { fn lazy_load_was_sent_before( diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index bdc083a0..283d45af 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -4,13 +4,13 @@ use std::collections::HashSet; pub use data::Data; use ruma::{DeviceId, UserId, RoomId}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn lazy_load_was_sent_before( &self, diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 2d718b2d..9b1ce079 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,4 +1,5 @@ use ruma::RoomId; +use crate::Result; pub trait Data { fn exists(&self, room_id: &RoomId) -> Result; diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 8417e28e..1bdb78d6 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -2,13 +2,13 @@ mod data; pub use data::Data; use ruma::RoomId; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn exists(&self, room_id: &RoomId) -> Result { diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 47250340..4da42236 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -16,7 +16,9 @@ pub mod state_compressor; pub mod timeline; pub mod user; -pub struct Service { +pub trait Data: alias::Data + auth_chain::Data + directory::Data + edus::Data + lazy_loading::Data + metadata::Data + outlier::Data + pdu_metadata::Data + search::Data + short::Data + state::Data + state_accessor::Data + state_cache::Data + state_compressor::Data + timeline::Data + user::Data {} + +pub struct Service { pub alias: alias::Service, pub auth_chain: auth_chain::Service, pub directory: directory::Service, diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index d579515e..17d0f7b4 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -1,6 +1,6 @@ -use ruma::{EventId, signatures::CanonicalJsonObject}; +use ruma::{signatures::CanonicalJsonObject, EventId}; -use crate::PduEvent; +use crate::{PduEvent, Result}; pub trait Data { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index ee8b940f..a495db8f 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -2,13 +2,13 @@ mod data; pub use data::Data; use ruma::{EventId, signatures::CanonicalJsonObject}; -use crate::{service::*, PduEvent}; +use crate::{Result, PduEvent}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Returns the pdu from the outlier tree. pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.db.get_outlier_pdu_json(event_id) diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 531823fe..fb839023 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,6 +1,7 @@ use std::sync::Arc; use ruma::{EventId, RoomId}; +use crate::Result; pub trait Data { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 3442b830..c57c1a28 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -4,13 +4,13 @@ use std::sync::Arc; pub use data::Data; use ruma::{RoomId, EventId}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { self.db.mark_as_referenced(room_id, event_ids) diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 16287eba..c0fd2a37 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,11 +1,12 @@ use ruma::RoomId; +use crate::Result; pub trait Data { - fn index_pdu<'a>(&self, room_id: &RoomId, pdu_id: u64, message_body: String) -> Result<()>; + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()>; fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, - ) -> Result> + 'a, Vec)>>; + ) -> Result>>, Vec)>>; } diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 9087deff..b7023f32 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,12 +1,14 @@ mod data; pub use data::Data; + +use crate::Result; use ruma::RoomId; pub struct Service { db: D, } -impl Service<_> { +impl Service { #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs new file mode 100644 index 00000000..3b1c3117 --- /dev/null +++ b/src/service/rooms/short/data.rs @@ -0,0 +1,2 @@ +pub trait Data { +} diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index afde14e2..1eb891e6 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -2,19 +2,18 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{EventId, events::StateEventType}; +use ruma::{EventId, events::StateEventType, RoomId}; -use crate::{service::*, Error, utils}; +use crate::{Result, Error, utils, services}; pub struct Service { db: D, } -impl Service<_> { +impl Service { pub fn get_or_create_shorteventid( &self, event_id: &EventId, - globals: &super::globals::Globals, ) -> Result { if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { return Ok(*short); @@ -24,7 +23,7 @@ impl Service<_> { Some(shorteventid) => utils::u64_from_bytes(&shorteventid) .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, None => { - let shorteventid = globals.next_count()?; + let shorteventid = services().globals.next_count()?; self.eventid_shorteventid .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; self.shorteventid_eventid @@ -82,7 +81,6 @@ impl Service<_> { &self, event_type: &StateEventType, state_key: &str, - globals: &super::globals::Globals, ) -> Result { if let Some(short) = self .statekeyshort_cache @@ -101,7 +99,7 @@ impl Service<_> { Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, None => { - let shortstatekey = globals.next_count()?; + let shortstatekey = services().globals.next_count()?; self.statekey_shortstatekey .insert(&statekey, &shortstatekey.to_be_bytes())?; self.shortstatekey_statekey @@ -190,7 +188,7 @@ impl Service<_> { /// Returns (shortstatehash, already_existed) fn get_or_create_shortstatehash( &self, - state_hash: &StateHashId, + state_hash: &[u8], ) -> Result<(u64, bool)> { Ok(match self.statehash_shortstatehash.get(state_hash)? { Some(shortstatehash) => ( @@ -199,7 +197,7 @@ impl Service<_> { true, ), None => { - let shortstatehash = globals.next_count()?; + let shortstatehash = services().globals.next_count()?; self.statehash_shortstatehash .insert(state_hash, &shortstatehash.to_be_bytes())?; (shortstatehash, false) @@ -220,13 +218,12 @@ impl Service<_> { pub fn get_or_create_shortroomid( &self, room_id: &RoomId, - globals: &super::globals::Globals, ) -> Result { Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { Some(short) => utils::u64_from_bytes(&short) .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, None => { - let short = globals.next_count()?; + let short = services().globals.next_count()?; self.roomid_shortroomid .insert(room_id.as_bytes(), &short.to_be_bytes())?; short diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index ac8fac21..fd0de282 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,30 +1,28 @@ use std::sync::Arc; use std::{sync::MutexGuard, collections::HashSet}; use std::fmt::Debug; - +use crate::Result; use ruma::{EventId, RoomId}; pub trait Data { /// Returns the last state hash key added to the db for the given room. - fn get_room_shortstatehash(room_id: &RoomId); + fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; /// Update the current state of the room. - fn set_room_state(room_id: &RoomId, new_shortstatehash: u64, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex - ); + fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()>; /// Associates a state with an event. - fn set_event_state(shorteventid: u64, shortstatehash: u64) -> Result<()>; + fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()>; /// Returns all events we would send as the prev_events of the next event. - fn get_forward_extremities(room_id: &RoomId) -> Result>>; + fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; /// Replace the forward extremities of the room. - fn set_forward_extremities( + fn set_forward_extremities<'a>(&self, room_id: &RoomId, - event_ids: impl IntoIterator + Debug, - _mutex_lock: &MutexGuard<'_, StateLock>, // Take mutex guard to make sure users get the room state mutex + event_ids: impl IntoIterator + Debug, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } - -pub struct StateLock; diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 6c33d521..e6b5ce20 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -6,13 +6,15 @@ use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEv use serde::Deserialize; use tracing::warn; -use crate::{service::*, SERVICE, PduEvent, Error, utils::calculate_hash}; +use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; + +use super::state_compressor::CompressedStateEvent; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Set the room to the given statehash and update caches. pub fn force_state( &self, @@ -23,11 +25,11 @@ impl Service<_> { ) -> Result<()> { for event_id in statediffnew.into_iter().filter_map(|new| { - SERVICE.rooms.state_compressor.parse_compressed_state_event(new) + services().rooms.state_compressor.parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { - let pdu = match SERVICE.rooms.timeline.get_pdu_json(&event_id)? { + let pdu = match services().rooms.timeline.get_pdu_json(&event_id)? { Some(pdu) => pdu, None => continue, }; @@ -63,10 +65,10 @@ impl Service<_> { Err(_) => continue, }; - SERVICE.room.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; + services().room.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; } - SERVICE.room.state_cache.update_joined_count(room_id)?; + services().room.state_cache.update_joined_count(room_id)?; self.db.set_room_state(room_id, shortstatehash); @@ -84,7 +86,7 @@ impl Service<_> { room_id: &RoomId, state_ids_compressed: HashSet, ) -> Result<()> { - let shorteventid = SERVICE.short.get_or_create_shorteventid(event_id)?; + let shorteventid = services().short.get_or_create_shorteventid(event_id)?; let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; @@ -96,11 +98,11 @@ impl Service<_> { ); let (shortstatehash, already_existed) = - SERVICE.short.get_or_create_shortstatehash(&state_hash)?; + services().short.get_or_create_shortstatehash(&state_hash)?; if !already_existed { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| SERVICE.room.state_compressor.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| services().room.state_compressor.load_shortstatehash_info(p))?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -119,7 +121,7 @@ impl Service<_> { } else { (state_ids_compressed, HashSet::new()) }; - SERVICE.room.state_compressor.save_state_from_diff( + services().room.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -176,7 +178,7 @@ impl Service<_> { } // TODO: statehash with deterministic inputs - let shortstatehash = SERVICE.globals.next_count()?; + let shortstatehash = services().globals.next_count()?; let mut statediffnew = HashSet::new(); statediffnew.insert(new); @@ -273,4 +275,8 @@ impl Service<_> { .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; Ok(room_version) } + + pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.db.get_room_shortstatehash(room_id) + } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index bf2972f9..48031e49 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,9 +1,11 @@ -use std::{sync::Arc, collections::HashMap}; +use std::{sync::Arc, collections::{HashMap, BTreeMap}}; +use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; -use crate::PduEvent; +use crate::{Result, PduEvent}; +#[async_trait] pub trait Data { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 92e5c8e1..5d6886d9 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -4,13 +4,13 @@ use std::{sync::Arc, collections::{HashMap, BTreeMap}}; pub use data::Data; use ruma::{events::StateEventType, RoomId, EventId}; -use crate::{service::*, PduEvent}; +use crate::{Result, PduEvent}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index f6519196..b45b2ea0 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,5 +1,9 @@ -use ruma::{UserId, RoomId}; +use ruma::{UserId, RoomId, serde::Raw, events::AnyStrippedStateEvent}; +use crate::Result; pub trait Data { - fn mark_as_once_joined(user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()>; + fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index d29501a6..c3b4eb91 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -5,13 +5,13 @@ pub use data::Data; use regex::Regex; use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, tag::TagEvent, RoomAccountDataEventType, GlobalAccountDataEventType, direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, AnySyncStateEvent}, serde::Raw, ServerName}; -use crate::{service::*, SERVICE, utils, Error}; +use crate::{Result, services, utils, Error}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Update current membership data. #[tracing::instrument(skip(self, last_state))] pub fn update_membership( @@ -24,8 +24,8 @@ impl Service<_> { update_joined_count: bool, ) -> Result<()> { // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != SERVICE.globals.server_name() { - SERVICE.users.create(user_id, None)?; + if user_id.server_name() != services().globals.server_name() { + services().users.create(user_id, None)?; // TODO: displayname, avatar url } @@ -37,10 +37,6 @@ impl Service<_> { serverroom_id.push(0xff); serverroom_id.extend_from_slice(room_id.as_bytes()); - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - match &membership { MembershipState::Join => { // Check if the user never joined this room @@ -80,24 +76,23 @@ impl Service<_> { // .ok(); // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( + if let Some(tag_event) = services().account_data.get::( Some(&predecessor.room_id), user_id, RoomAccountDataEventType::Tag, )? { - SERVICE.account_data + services().account_data .update( Some(room_id), user_id, RoomAccountDataEventType::Tag, &tag_event, - &db.globals, ) .ok(); }; // Copy direct chat flag - if let Some(mut direct_event) = SERVICE.account_data.get::( + if let Some(mut direct_event) = services().account_data.get::( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), @@ -112,7 +107,7 @@ impl Service<_> { } if room_ids_updated { - SERVICE.account_data.update( + services().account_data.update( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), @@ -123,16 +118,11 @@ impl Service<_> { } } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; + self.db.mark_as_joined(user_id, room_id)?; } MembershipState::Invite => { // We want to know if the sender is ignored by the receiver - let is_ignored = SERVICE + let is_ignored = services() .account_data .get::( None, // Ignored users are in global account data @@ -153,41 +143,22 @@ impl Service<_> { return Ok(()); } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; + self.db.mark_as_invited(user_id, room_id, last_state)?; } MembershipState::Leave | MembershipState::Ban => { - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; + self.db.mark_as_left(user_id, room_id)?; } _ => {} } if update_joined_count { - self.update_joined_count(room_id, db)?; + self.update_joined_count(room_id)?; } Ok(()) } - #[tracing::instrument(skip(self, room_id, db))] + #[tracing::instrument(skip(self, room_id))] pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { let mut joinedcount = 0_u64; let mut invitedcount = 0_u64; @@ -196,8 +167,8 @@ impl Service<_> { for joined in self.room_members(room_id).filter_map(|r| r.ok()) { joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) + if joined.server_name() == services().globals.server_name() + && !services().users.is_deactivated(&joined).unwrap_or(true) { real_users.insert(joined); } @@ -285,7 +256,7 @@ impl Service<_> { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, SERVICE.globals.server_name()).ok() + UserId::parse_with_server_name(string, services().globals.server_name()).ok() }); let in_room = bridge_user_id diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index 74a28e7b..17689364 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,4 +1,5 @@ -use crate::service::rooms::CompressedStateEvent; +use super::CompressedStateEvent; +use crate::Result; pub struct StateDiff { parent: Option, @@ -7,6 +8,6 @@ pub struct StateDiff { } pub trait Data { - fn get_statediff(shortstatehash: u64) -> Result; - fn save_statediff(shortstatehash: u64, diff: StateDiff) -> Result<()>; + fn get_statediff(&self, shortstatehash: u64) -> Result; + fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()>; } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 3aea4fe6..619e4cf5 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -4,7 +4,7 @@ use std::{mem::size_of, sync::Arc, collections::HashSet}; pub use data::Data; use ruma::{EventId, RoomId}; -use crate::{service::*, utils}; +use crate::{Result, utils, services}; use self::data::StateDiff; @@ -12,7 +12,9 @@ pub struct Service { db: D, } -impl Service<_> { +pub type CompressedStateEvent = [u8; 2 * size_of::()]; + +impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( @@ -62,12 +64,11 @@ impl Service<_> { &self, shortstatekey: u64, event_id: &EventId, - globals: &super::globals::Globals, ) -> Result { let mut v = shortstatekey.to_be_bytes().to_vec(); v.extend_from_slice( &self - .get_or_create_shorteventid(event_id, globals)? + .get_or_create_shorteventid(event_id)? .to_be_bytes(), ); Ok(v.try_into().expect("we checked the size above")) @@ -210,15 +211,16 @@ impl Service<_> { /// Returns the new shortstatehash pub fn save_state( + &self, room_id: &RoomId, new_state_ids_compressed: HashSet, ) -> Result<(u64, HashSet, // added HashSet)> // removed { - let previous_shortstatehash = self.d.current_shortstatehash(room_id)?; + let previous_shortstatehash = self.db.current_shortstatehash(room_id)?; - let state_hash = self.calculate_hash( + let state_hash = utils::calculate_hash( &new_state_ids_compressed .iter() .map(|bytes| &bytes[..]) @@ -226,7 +228,7 @@ impl Service<_> { ); let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; + services().rooms.short.get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { return Ok(()); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index bf6d8c5e..85bedc69 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -2,7 +2,7 @@ use std::sync::Arc; use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; -use crate::PduEvent; +use crate::{Result, PduEvent}; pub trait Data { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; @@ -48,28 +48,26 @@ pub trait Data { /// Returns an iterator over all events in a room that happened after the event with id `since` /// in chronological order. - #[tracing::instrument(skip(self))] fn pdus_since<'a>( &'a self, user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>> + 'a>; + ) -> Result, PduEvent)>>>>; /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] fn pdus_until<'a>( &'a self, user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>> + 'a>; + ) -> Result, PduEvent)>>>>; fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>> + 'a>; + ) -> Result, PduEvent)>>>>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 7b60fe5d..09f66ddf 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,23 +1,29 @@ mod data; +use std::borrow::Cow; +use std::sync::Arc; use std::{sync::MutexGuard, iter, collections::HashSet}; use std::fmt::Debug; pub use data::Data; use regex::Regex; +use ruma::events::room::power_levels::RoomPowerLevelsEventContent; +use ruma::push::Ruleset; use ruma::signatures::CanonicalJsonValue; +use ruma::state_res::RoomVersion; use ruma::{EventId, signatures::CanonicalJsonObject, push::{Action, Tweak}, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType, RoomEventType, room::{member::MembershipState, create::RoomCreateEventContent}, StateEventType}, UserId, RoomAliasId, RoomId, uint, state_res, api::client::error::ErrorKind, serde::to_canonical_value, ServerName}; use serde::Deserialize; use serde_json::value::to_raw_value; use tracing::{warn, error}; -use crate::SERVICE; -use crate::{service::{*, pdu::{PduBuilder, EventHash}}, Error, PduEvent, utils}; +use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduEvent, utils}; + +use super::state_compressor::CompressedStateEvent; pub struct Service { db: D, } -impl Service<_> { +impl Service { /* /// Checks if a room exists. #[tracing::instrument(skip(self))] @@ -44,7 +50,7 @@ impl Service<_> { #[tracing::instrument(skip(self))] pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { - self.db.last_timeline_count(sender_user: &UserId, room_id: &RoomId) + self.db.last_timeline_count(sender_user, room_id) } // TODO Is this the same as the function above? @@ -127,7 +133,7 @@ impl Service<_> { /// Removes a pdu and creates a new one with the same id. #[tracing::instrument(skip(self))] fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - self.db.pdu_count(pdu_id, pdu: &PduEvent) + self.db.replace_pdu(pdu_id, pdu) } /// Creates a new persisted data unit and adds it to a room. @@ -177,7 +183,7 @@ impl Service<_> { self.replace_pdu_leaves(&pdu.room_id, leaves)?; let mutex_insert = Arc::clone( - db.globals + services().globals .roomid_mutex_insert .write() .unwrap() @@ -186,14 +192,14 @@ impl Service<_> { ); let insert_lock = mutex_insert.lock().unwrap(); - let count1 = db.globals.next_count()?; + let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; + .private_read_set(&pdu.room_id, &pdu.sender, count1)?; self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - let count2 = db.globals.next_count()?; + let count2 = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&count2.to_be_bytes()); @@ -218,7 +224,7 @@ impl Service<_> { drop(insert_lock); // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db + let power_levels: RoomPowerLevelsEventContent = services() .rooms .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { @@ -233,13 +239,13 @@ impl Service<_> { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { + for user in self.get_our_real_users(&pdu.room_id)?.iter() { // Don't notify the user of their own events if user == &pdu.sender { continue; } - let rules_for_user = db + let rules_for_user = services() .account_data .get( None, @@ -252,7 +258,7 @@ impl Service<_> { let mut highlight = false; let mut notify = false; - for action in pusher::get_actions( + for action in services().pusher.get_actions( user, &rules_for_user, &power_levels, @@ -282,8 +288,8 @@ impl Service<_> { highlights.push(userroom_id); } - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; + for senderkey in services().pusher.get_pusher_senderkeys(user) { + services().sending.send_push_pdu(&*pdu_id, senderkey)?; } } @@ -328,7 +334,6 @@ impl Service<_> { content.membership, &pdu.sender, invite_state, - db, true, )?; } @@ -344,34 +349,34 @@ impl Service<_> { .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - DB.rooms.search.index_pdu(room_id, pdu_id, body)?; + services().rooms.search.index_pdu(shortroomid, pdu_id, body)?; - let admin_room = self.id_from_alias( + let admin_room = self.alias.resolve_local_alias( <&RoomAliasId>::try_from( - format!("#admins:{}", db.globals.server_name()).as_str(), + format!("#admins:{}", services().globals.server_name()).as_str(), ) .expect("#admins:server_name is a valid room alias"), )?; - let server_user = format!("@conduit:{}", db.globals.server_name()); + let server_user = format!("@conduit:{}", services().globals.server_name()); let to_conduit = body.starts_with(&format!("{}: ", server_user)); // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit let from_conduit = - pdu.sender == server_user && db.globals.emergency_password().is_none(); + pdu.sender == server_user && services().globals.emergency_password().is_none(); if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - db.admin.process_message(body.to_string()); + services().admin.process_message(body.to_string()); } } } _ => {} } - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + for appservice in services().appservice.all()? { + if self.appservice_in_room(&pdu.room_id, &appservice)? { + services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } @@ -388,11 +393,11 @@ impl Service<_> { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() + UserId::parse_with_server_name(string, services().globals.server_name()).ok() }) { if state_key_uid == &appservice_uid { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } } @@ -431,16 +436,16 @@ impl Service<_> { .map_or(false, |state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) + self.room_aliases(&pdu.room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) + || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) || users.iter().any(matching_users) { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; } } } @@ -464,14 +469,14 @@ impl Service<_> { redacts, } = pdu_builder; - let prev_events: Vec<_> = SERVICE + let prev_events: Vec<_> = services() .rooms .get_pdu_leaves(room_id)? .into_iter() .take(20) .collect(); - let create_event = SERVICE + let create_event = services() .rooms .room_state_get(room_id, &StateEventType::RoomCreate, "")?; @@ -488,7 +493,7 @@ impl Service<_> { // If there was no create event yet, assume we are creating a room with the default // version right now let room_version_id = create_event_content - .map_or(SERVICE.globals.default_room_version(), |create_event| { + .map_or(services().globals.default_room_version(), |create_event| { create_event.room_version }); let room_version = @@ -500,7 +505,7 @@ impl Service<_> { // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) + .filter_map(|event_id| Some(services().rooms.get_pdu(event_id).ok()??.depth)) .max() .unwrap_or_else(|| uint!(0)) + uint!(1); @@ -525,7 +530,7 @@ impl Service<_> { let pdu = PduEvent { event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), - sender: sender_user.to_owned(), + sender: sender.to_owned(), origin_server_ts: utils::millis_since_unix_epoch() .try_into() .expect("time is valid"), @@ -577,13 +582,13 @@ impl Service<_> { // Add origin because synapse likes that (and it's required in the spec) pdu_json.insert( "origin".to_owned(), - to_canonical_value(db.globals.server_name()) + to_canonical_value(services().globals.server_name()) .expect("server name is a valid CanonicalJsonValue"), ); match ruma::signatures::hash_and_sign_event( - SERVICE.globals.server_name().as_str(), - SERVICE.globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut pdu_json, &room_version_id, ) { @@ -616,22 +621,20 @@ impl Service<_> { ); // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; + let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id)?; } /// Creates a new persisted data unit and adds it to a room. This function takes a /// roomid_mutex_state, meaning that only this function is able to mutate the room state. - #[tracing::instrument(skip(self, _mutex_lock))] + #[tracing::instrument(skip(self, state_lock))] pub fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, room_id: &RoomId, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - - let (pdu, pdu_json) = self.create_hash_and_sign_event()?; - + let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock); // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. @@ -664,9 +667,9 @@ impl Service<_> { } // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above - servers.remove(SERVICE.globals.server_name()); + servers.remove(services().globals.server_name()); - SERVICE.sending.send_pdu(servers.into_iter(), &pdu_id)?; + services().sending.send_pdu(servers.into_iter(), &pdu_id)?; Ok(pdu.event_id) } @@ -684,20 +687,20 @@ impl Service<_> { ) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - SERVICE.rooms.set_event_state( + services().rooms.set_event_state( &pdu.event_id, &pdu.room_id, state_ids_compressed, )?; if soft_fail { - SERVICE.rooms + services().rooms .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - SERVICE.rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + services().rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; return Ok(None); } - let pdu_id = SERVICE.rooms.append_pdu(pdu, pdu_json, new_room_leaves)?; + let pdu_id = services().rooms.append_pdu(pdu, pdu_json, new_room_leaves)?; Ok(Some(pdu_id)) } diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 47a44eef..a5657bc1 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,3 +1,6 @@ +use ruma::{UserId, RoomId}; +use crate::Result; + pub trait Data { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; @@ -17,5 +20,5 @@ pub trait Data { fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>> + 'a>; + ) -> Result>>>>; } diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 664f8a0a..729887c3 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -2,13 +2,13 @@ mod data; pub use data::Data; use ruma::{RoomId, UserId}; -use crate::service::*; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { self.db.reset_notification_counts(user_id, room_id) } @@ -27,7 +27,7 @@ impl Service<_> { token: u64, shortstatehash: u64, ) -> Result<()> { - self.db.associate_token_shortstatehash(user_id, room_id) + self.db.associate_token_shortstatehash(room_id, token, shortstatehash) } pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { diff --git a/src/service/sending.rs b/src/service/sending/mod.rs similarity index 88% rename from src/service/sending.rs rename to src/service/sending/mod.rs index 4c830d6f..8ab557f6 100644 --- a/src/service/sending.rs +++ b/src/service/sending/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use crate::{ - appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, + utils, Error, PduEvent, Result, services, api::{server_server, appservice_server}, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -34,8 +34,6 @@ use tokio::{ }; use tracing::{error, warn}; -use super::abstraction::Tree; - #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(String), @@ -77,11 +75,8 @@ pub enum SendingEventType { Edu(Vec), } -pub struct Sending { +pub struct Service { /// The state for a given state hash. - pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync - pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content - pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content pub(super) maximum_requests: Arc, pub sender: mpsc::UnboundedSender<(Vec, Vec)>, } @@ -92,10 +87,9 @@ enum TransactionStatus { Retrying(u32), // number of times failed } -impl Sending { +impl Service { pub fn start_handler( &self, - db: Arc>, mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>, ) { tokio::spawn(async move { @@ -106,9 +100,7 @@ impl Sending { // Retry requests we could not finish yet let mut initial_transactions = HashMap::>::new(); - let guard = db.read().await; - - for (key, outgoing_kind, event) in guard + for (key, outgoing_kind, event) in services() .sending .servercurrentevent_data .iter() @@ -127,22 +119,19 @@ impl Sending { "Dropping some current events: {:?} {:?} {:?}", key, outgoing_kind, event ); - guard.sending.servercurrentevent_data.remove(&key).unwrap(); + services().sending.servercurrentevent_data.remove(&key).unwrap(); continue; } entry.push(event); } - drop(guard); - for (outgoing_kind, events) in initial_transactions { current_transaction_status .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); futures.push(Self::handle_events( outgoing_kind.clone(), events, - Arc::clone(&db), )); } @@ -151,17 +140,15 @@ impl Sending { Some(response) = futures.next() => { match response { Ok(outgoing_kind) => { - let guard = db.read().await; - let prefix = outgoing_kind.get_prefix(); - for (key, _) in guard.sending.servercurrentevent_data + for (key, _) in services().sending.servercurrentevent_data .scan_prefix(prefix.clone()) { - guard.sending.servercurrentevent_data.remove(&key).unwrap(); + services().sending.servercurrentevent_data.remove(&key).unwrap(); } // Find events that have been added since starting the last request - let new_events: Vec<_> = guard.sending.servernameevent_data + let new_events: Vec<_> = services().sending.servernameevent_data .scan_prefix(prefix.clone()) .filter_map(|(k, v)| { Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) @@ -175,17 +162,14 @@ impl Sending { // Insert pdus we found for (e, key) in &new_events { let value = if let SendingEventType::Edu(value) = &e.1 { &**value } else { &[] }; - guard.sending.servercurrentevent_data.insert(key, value).unwrap(); - guard.sending.servernameevent_data.remove(key).unwrap(); + services().sending.servercurrentevent_data.insert(key, value).unwrap(); + services().sending.servernameevent_data.remove(key).unwrap(); } - drop(guard); - futures.push( Self::handle_events( outgoing_kind.clone(), new_events.into_iter().map(|(event, _)| event.1).collect(), - Arc::clone(&db), ) ); } else { @@ -206,15 +190,12 @@ impl Sending { }, Some((key, value)) = receiver.recv() => { if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) { - let guard = db.read().await; - if let Ok(Some(events)) = Self::select_events( &outgoing_kind, vec![(event, key)], &mut current_transaction_status, - &guard ) { - futures.push(Self::handle_events(outgoing_kind, events, Arc::clone(&db))); + futures.push(Self::handle_events(outgoing_kind, events)); } } } @@ -223,12 +204,11 @@ impl Sending { }); } - #[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status, db))] + #[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status))] fn select_events( outgoing_kind: &OutgoingKind, new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key current_transaction_status: &mut HashMap, TransactionStatus>, - db: &Database, ) -> Result>> { let mut retry = false; let mut allow = true; @@ -266,7 +246,7 @@ impl Sending { if retry { // We retry the previous transaction - for (key, value) in db.sending.servercurrentevent_data.scan_prefix(prefix) { + for (key, value) in services().sending.servercurrentevent_data.scan_prefix(prefix) { if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) { events.push(e); } @@ -278,22 +258,22 @@ impl Sending { } else { &[][..] }; - db.sending + services().sending .servercurrentevent_data .insert(&full_key, value)?; // If it was a PDU we have to unqueue it // TODO: don't try to unqueue EDUs - db.sending.servernameevent_data.remove(&full_key)?; + services().sending.servernameevent_data.remove(&full_key)?; events.push(e); } if let OutgoingKind::Normal(server_name) = outgoing_kind { - if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) { + if let Ok((select_edus, last_count)) = Self::select_edus(server_name) { events.extend(select_edus.into_iter().map(SendingEventType::Edu)); - db.sending + services().sending .servername_educount .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; } @@ -303,10 +283,10 @@ impl Sending { Ok(Some(events)) } - #[tracing::instrument(skip(db, server))] - pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec>, u64)> { + #[tracing::instrument(skip(server))] + pub fn select_edus(server: &ServerName) -> Result<(Vec>, u64)> { // u64: count of last edu - let since = db + let since = services() .sending .servername_educount .get(server.as_bytes())? @@ -318,25 +298,25 @@ impl Sending { let mut max_edu_count = since; let mut device_list_changes = HashSet::new(); - 'outer: for room_id in db.rooms.server_rooms(server) { + 'outer: for room_id in services().rooms.server_rooms(server) { let room_id = room_id?; // Look for device list updates in this room device_list_changes.extend( - db.users + services().users .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()) - .filter(|user_id| user_id.server_name() == db.globals.server_name()), + .filter(|user_id| user_id.server_name() == services().globals.server_name()), ); // Look for read receipts in this room - for r in db.rooms.edus.readreceipts_since(&room_id, since) { + for r in services().rooms.edus.readreceipts_since(&room_id, since) { let (user_id, count, read_receipt) = r?; if count > max_edu_count { max_edu_count = count; } - if user_id.server_name() != db.globals.server_name() { + if user_id.server_name() != services().globals.server_name() { continue; } @@ -496,14 +476,11 @@ impl Sending { Ok(()) } - #[tracing::instrument(skip(db, events, kind))] + #[tracing::instrument(skip(events, kind))] async fn handle_events( kind: OutgoingKind, events: Vec, - db: Arc>, ) -> Result { - let db = db.read().await; - match &kind { OutgoingKind::Appservice(id) => { let mut pdu_jsons = Vec::new(); @@ -511,7 +488,7 @@ impl Sending { for event in &events { match event { SendingEventType::Pdu(pdu_id) => { - pdu_jsons.push(db.rooms + pdu_jsons.push(services().rooms .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -530,11 +507,10 @@ impl Sending { } } - let permit = db.sending.maximum_requests.acquire().await; + let permit = services().sending.maximum_requests.acquire().await; let response = appservice_server::send_request( - &db.globals, - db.appservice + services().appservice .get_registration(&id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -576,7 +552,7 @@ impl Sending { match event { SendingEventType::Pdu(pdu_id) => { pdus.push( - db.rooms + services().rooms .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -624,7 +600,7 @@ impl Sending { senderkey.push(0xff); senderkey.extend_from_slice(pushkey); - let pusher = match db + let pusher = match services() .pusher .get_pusher(&senderkey) .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? @@ -633,7 +609,7 @@ impl Sending { None => continue, }; - let rules_for_user = db + let rules_for_user = services() .account_data .get( None, @@ -644,22 +620,21 @@ impl Sending { .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); - let unread: UInt = db + let unread: UInt = services() .rooms .notification_count(&userid, &pdu.room_id) .map_err(|e| (kind.clone(), e))? .try_into() .expect("notifiation count can't go that high"); - let permit = db.sending.maximum_requests.acquire().await; + let permit = services().sending.maximum_requests.acquire().await; - let _response = pusher::send_push_notice( + let _response = services().pusher.send_push_notice( &userid, unread, &pusher, rules_for_user, &pdu, - &db, ) .await .map(|_response| kind.clone()) @@ -678,7 +653,7 @@ impl Sending { SendingEventType::Pdu(pdu_id) => { // TODO: check room version and remove event_id if needed let raw = PduEvent::convert_to_outgoing_federation_event( - db.rooms + services().rooms .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { @@ -700,13 +675,12 @@ impl Sending { } } - let permit = db.sending.maximum_requests.acquire().await; + let permit = services().sending.maximum_requests.acquire().await; let response = server_server::send_request( - &db.globals, &*server, send_transaction_message::v1::Request { - origin: db.globals.server_name(), + origin: services().globals.server_name(), pdus: &pdu_jsons, edus: &edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), @@ -809,10 +783,9 @@ impl Sending { }) } - #[tracing::instrument(skip(self, globals, destination, request))] + #[tracing::instrument(skip(self, destination, request))] pub async fn send_federation_request( &self, - globals: &crate::database::globals::Globals, destination: &ServerName, request: T, ) -> Result @@ -820,16 +793,15 @@ impl Sending { T: Debug, { let permit = self.maximum_requests.acquire().await; - let response = server_server::send_request(globals, destination, request).await; + let response = server_server::send_request(destination, request).await; drop(permit); response } - #[tracing::instrument(skip(self, globals, registration, request))] + #[tracing::instrument(skip(self, registration, request))] pub async fn send_appservice_request( &self, - globals: &crate::database::globals::Globals, registration: serde_yaml::Value, request: T, ) -> Result @@ -837,7 +809,7 @@ impl Sending { T: Debug, { let permit = self.maximum_requests.acquire().await; - let response = appservice_server::send_request(globals, registration, request).await; + let response = appservice_server::send_request(registration, request).await; drop(permit); response diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index c1b47154..6e71dd46 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,3 +1,6 @@ +use ruma::{DeviceId, UserId, TransactionId}; +use crate::Result; + pub trait Data { fn add_txnid( &self, diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 9b76e13b..ea923722 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,14 +1,14 @@ mod data; pub use data::Data; -use ruma::{UserId, DeviceId, TransactionId}; -use crate::service::*; +use ruma::{UserId, DeviceId, TransactionId}; +use crate::Result; pub struct Service { db: D, } -impl Service<_> { +impl Service { pub fn add_txnid( &self, user_id: &UserId, diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index cc943bff..d7fa79d2 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,4 +1,5 @@ use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId, signatures::CanonicalJsonValue}; +use crate::Result; pub trait Data { fn set_uiaa_request( diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 5e1df8f3..ffdbf356 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,15 +1,16 @@ mod data; pub use data::Data; -use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; + +use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType, IncomingUserIdentifier}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; use tracing::error; -use crate::{service::*, utils, Error, SERVICE}; +use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Creates a new Uiaa session. Make sure the session token is unique. pub fn create( &self, @@ -56,7 +57,7 @@ impl Service<_> { .. }) => { let username = match identifier { - UserIdOrLocalpart(username) => username, + IncomingUserIdentifier::UserIdOrLocalpart(username) => username, _ => { return Err(Error::BadRequest( ErrorKind::Unrecognized, @@ -66,13 +67,13 @@ impl Service<_> { }; let user_id = - UserId::parse_with_server_name(username.clone(), SERVICE.globals.server_name()) + UserId::parse_with_server_name(username.clone(), services().globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") })?; // Check if password is correct - if let Some(hash) = SERVICE.users.password_hash(&user_id)? { + if let Some(hash) = services().users.password_hash(&user_id)? { let hash_matches = argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 327e0c69..3f87589c 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,8 +1,8 @@ use std::collections::BTreeMap; - +use crate::Result; use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri}; -trait Data { +pub trait Data { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result; @@ -16,7 +16,7 @@ trait Data { fn find_from_token(&self, token: &str) -> Result, String)>>; /// Returns an iterator over all users on this homeserver. - fn iter(&self) -> impl Iterator>> + '_; + fn iter(&self) -> Box>>>; /// Returns a list of local users as list of usernames. /// @@ -69,7 +69,7 @@ trait Data { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a; + ) -> Box>>>; /// Replaces the access token of one device. fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; @@ -125,7 +125,7 @@ trait Data { user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator>> + 'a; + ) -> Box>>>; fn mark_device_key_update( &self, @@ -193,7 +193,7 @@ trait Data { fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a; + ) -> Box>>; /// Creates a new sync filter. Returns the filter id. fn create_filter( diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index bfa4b8e5..dfe6c7fb 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -2,15 +2,15 @@ mod data; use std::{collections::BTreeMap, mem}; pub use data::Data; -use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}}; +use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition, error::ErrorKind}, RoomAliasId}; -use crate::{service::*, Error}; +use crate::{Result, Error, services}; pub struct Service { db: D, } -impl Service<_> { +impl Service { /// Check if a user has an account on this homeserver. pub fn exists(&self, user_id: &UserId) -> Result { self.db.exists(user_id) @@ -22,19 +22,19 @@ impl Service<_> { } /// Check if a user is an admin - fn is_admin( + pub fn is_admin( &self, user_id: &UserId, ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", globals.server_name())) + let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); + let admin_room_id = services().rooms.alias.resolve_local_alias(&admin_room_alias_id)?.unwrap(); - rooms.is_joined(user_id, &admin_room_id) + services().rooms.state_cache.is_joined(user_id, &admin_room_id) } /// Create a new user account on this homeserver. - fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { self.db.set_password(user_id, password)?; Ok(()) } From 8708cd3b633d88d260982563f2e2826bc8b12038 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 09:34:25 +0200 Subject: [PATCH 386/445] 431 errors left --- src/api/client_server/media.rs | 2 +- src/api/client_server/membership.rs | 61 ++- src/api/client_server/message.rs | 19 +- src/api/client_server/presence.rs | 7 +- src/api/client_server/profile.rs | 11 +- src/api/client_server/read_marker.rs | 17 +- src/api/client_server/redact.rs | 2 +- src/api/client_server/report.rs | 2 +- src/api/client_server/room.rs | 53 +-- src/api/client_server/search.rs | 5 +- src/api/client_server/state.rs | 18 +- src/api/client_server/sync.rs | 79 ++-- src/api/client_server/typing.rs | 6 +- src/api/client_server/user_directory.rs | 6 +- src/api/client_server/voip.rs | 2 +- src/api/server_server.rs | 379 +++---------------- src/database/key_value/appservice.rs | 6 +- src/database/key_value/rooms/directory.rs | 4 +- src/database/mod.rs | 2 +- src/lib.rs | 4 +- src/service/account_data/mod.rs | 110 +----- src/service/admin/mod.rs | 52 +-- src/service/appservice/data.rs | 2 +- src/service/globals/mod.rs | 3 +- src/service/key_backups/mod.rs | 280 +------------- src/service/media/mod.rs | 8 +- src/service/pusher/mod.rs | 2 + src/service/rooms/auth_chain/mod.rs | 2 +- src/service/rooms/edus/mod.rs | 6 +- src/service/rooms/event_handler/mod.rs | 441 +++++++++++++++++----- src/service/rooms/state/mod.rs | 10 +- src/service/rooms/timeline/mod.rs | 12 +- 32 files changed, 640 insertions(+), 973 deletions(-) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index d6e8213c..316e284b 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -196,7 +196,7 @@ pub async fn get_content_thumbnail_route( .upload_thumbnail( mxc, &None, - &get_thumbnail_response.content_type, + &get_thumbnail_response.content_type.as_deref(), body.width.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"), &get_thumbnail_response.file, diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index d6f820a7..98931f25 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -481,7 +481,7 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if services().rooms.is_supported_version(&room_version) => room_version, + Some(room_version) if services().rooms.metadata.is_supported_version(&room_version) => room_version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -591,7 +591,7 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") })?; - services().rooms.add_pdu_outlier(&event_id, &value)?; + services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { let shortstatekey = services().rooms.short.get_or_create_shortstatekey( &pdu.kind.to_string().into(), @@ -621,14 +621,6 @@ async fn join_room_by_id_helper( return Err(Error::BadServerResponse("State contained no create event.")); } - services().rooms.state.force_state( - room_id, - state - .into_iter() - .map(|(k, id)| services().rooms.compress_state_event(k, &id)) - .collect::>()?, - )?; - for result in send_join_response .room_state .auth_chain @@ -640,14 +632,21 @@ async fn join_room_by_id_helper( Err(_) => continue, }; - services().rooms.add_pdu_outlier(&event_id, &value)?; + services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; } + let shortstatehash = services().rooms.state.set_event_state( + event_id, + room_id, + state + .into_iter() + .map(|(k, id)| services().rooms.state_compressor.compress_state_event(k, &id)) + .collect::>()?, + )?; + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = services().rooms.append_to_state(&parsed_pdu)?; - - services().rooms.append_pdu( + services().rooms.timeline.append_pdu( &parsed_pdu, join_event, iter::once(&*parsed_pdu.event_id), @@ -655,7 +654,9 @@ async fn join_room_by_id_helper( // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - services().rooms.set_room_state(room_id, statehashid)?; + services().rooms.state.set_room_state(room_id, shortstatehash)?; + + let statehashid = services().rooms.state.append_to_state(&parsed_pdu)?; } else { let event = RoomMemberEventContent { membership: MembershipState::Join, @@ -668,7 +669,7 @@ async fn join_room_by_id_helper( join_authorized_via_users_server: None, }; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), @@ -678,7 +679,6 @@ async fn join_room_by_id_helper( }, sender_user, room_id, - services(), &state_lock, )?; } @@ -786,7 +786,7 @@ pub(crate) async fn invite_helper<'a>( unsigned: None, state_key: Some(user_id.to_string()), redacts: None, - }, sender_user, room_id, &state_lock); + }, sender_user, room_id, &state_lock)?; let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; @@ -811,7 +811,7 @@ pub(crate) async fn invite_helper<'a>( create_invite::v2::Request { room_id, event_id: expected_event_id, - room_version: &services().state.get_room_version(&room_id)?, + room_version: &services().rooms.state.get_room_version(&room_id)?, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, @@ -846,7 +846,7 @@ pub(crate) async fn invite_helper<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = services().rooms.event_handler.handle_incoming_pdu( + let pdu_id: Vec = services().rooms.event_handler.handle_incoming_pdu( &origin, &event_id, room_id, @@ -854,13 +854,7 @@ pub(crate) async fn invite_helper<'a>( true, &pub_key_map, ) - .await - .map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? + .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Could not accept incoming PDU as timeline event.", @@ -868,6 +862,7 @@ pub(crate) async fn invite_helper<'a>( let servers = services() .rooms + .state_cache .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != services().globals.server_name()); @@ -877,7 +872,7 @@ pub(crate) async fn invite_helper<'a>( return Ok(()); } - if !services().rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -894,7 +889,7 @@ pub(crate) async fn invite_helper<'a>( ); let state_lock = mutex_state.lock().await; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -926,8 +921,9 @@ pub(crate) async fn invite_helper<'a>( pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { let all_rooms = services() .rooms + .state_cache .rooms_joined(user_id) - .chain(services().rooms.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .chain(services().rooms.state_cache.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) .collect::>(); for room_id in all_rooms { @@ -955,7 +951,7 @@ pub async fn leave_room( let last_state = services().rooms.state_cache .invite_state(user_id, room_id)? - .map_or_else(|| services().rooms.left_state(user_id, room_id), |s| Ok(Some(s)))?; + .map_or_else(|| services().rooms.state_cache.left_state(user_id, room_id), |s| Ok(Some(s)))?; // We always drop the invite, we can't rely on other servers services().rooms.state_cache.update_membership( @@ -978,7 +974,7 @@ pub async fn leave_room( let state_lock = mutex_state.lock().await; let mut event: RoomMemberEventContent = serde_json::from_str( - services().rooms.state.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + services().rooms.state_accessor.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -1017,6 +1013,7 @@ async fn remote_leave_room( let invite_state = services() .rooms + .state_cache .invite_state(user_id, room_id)? .ok_or(Error::BadRequest( ErrorKind::BadState, diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 861f9c13..bfdc2fdb 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -68,7 +68,7 @@ pub async fn send_message_event_route( let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - let event_id = services().rooms.build_and_append_pdu( + let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: body.event_type.to_string().into(), content: serde_json::from_str(body.body.body.json().get()) @@ -108,7 +108,7 @@ pub async fn get_message_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -129,7 +129,7 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); services().rooms - .lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; + .lazy_loading.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); @@ -144,12 +144,13 @@ pub async fn get_message_events_route( get_message_events::v3::Direction::Forward => { let events_after: Vec<_> = services() .rooms + .timeline .pdus_after(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { services().rooms - .pdu_count(&pdu_id) + .timeline.pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() }) @@ -157,7 +158,7 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_after { - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &body.room_id, @@ -181,11 +182,13 @@ pub async fn get_message_events_route( get_message_events::v3::Direction::Backward => { let events_before: Vec<_> = services() .rooms + .timeline .pdus_until(sender_user, &body.room_id, from)? .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { services().rooms + .timeline .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() @@ -194,7 +197,7 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_before { - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, &body.room_id, @@ -220,7 +223,7 @@ pub async fn get_message_events_route( resp.state = Vec::new(); for ll_id in &lazy_loaded { if let Some(member_event) = - services().rooms + services().rooms.state_accessor .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? { resp.state.push(member_event.to_state_event()); @@ -228,7 +231,7 @@ pub async fn get_message_events_route( } if let Some(next_token) = next_token { - services().rooms.lazy_load_mark_sent( + services().rooms.lazy_loading.lazy_load_mark_sent( sender_user, sender_device, &body.room_id, diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index bc220b80..6a915e44 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -10,10 +10,10 @@ pub async fn set_presence_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for room_id in services().rooms.rooms_joined(sender_user) { + for room_id in services().rooms.state_cache.rooms_joined(sender_user) { let room_id = room_id?; - services().rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { @@ -51,13 +51,14 @@ pub async fn get_presence_route( for room_id in services() .rooms - .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? + .user.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? { let room_id = room_id?; if let Some(presence) = services() .rooms .edus + .presence .get_last_presence_event(sender_user, &room_id)? { presence_event = Some(presence); diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 7a87bcd1..3e1d736f 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -30,6 +30,7 @@ pub async fn set_displayname_route( // Send a new membership event and presence update into all joined rooms let all_rooms_joined: Vec<_> = services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .map(|room_id| { @@ -40,6 +41,7 @@ pub async fn set_displayname_route( displayname: body.displayname.clone(), ..serde_json::from_str( services().rooms + .state_accessor .room_state_get( &room_id, &StateEventType::RoomMember, @@ -80,10 +82,11 @@ pub async fn set_displayname_route( let _ = services() .rooms + .timeline .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); // Presence update - services().rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { @@ -155,6 +158,7 @@ pub async fn set_avatar_url_route( // Send a new membership event and presence update into all joined rooms let all_joined_rooms: Vec<_> = services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .map(|room_id| { @@ -165,6 +169,7 @@ pub async fn set_avatar_url_route( avatar_url: body.avatar_url.clone(), ..serde_json::from_str( services().rooms + .state_accessor .room_state_get( &room_id, &StateEventType::RoomMember, @@ -205,10 +210,11 @@ pub async fn set_avatar_url_route( let _ = services() .rooms + .timeline .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); // Presence update - services().rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { @@ -226,7 +232,6 @@ pub async fn set_avatar_url_route( }, sender: sender_user.clone(), }, - &services().globals, )?; } diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index 284ae65e..eda57d57 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -31,15 +31,15 @@ pub async fn set_read_marker_route( )?; if let Some(event) = &body.read_receipt { - services().rooms.edus.private_read_set( + services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - services().rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( + services().rooms.timeline.get_pdu_count(event)?.ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, )?; - services().rooms + services().rooms.user .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -56,7 +56,7 @@ pub async fn set_read_marker_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(event.to_owned(), receipts); - services().rooms.edus.readreceipt_update( + services().rooms.edus.read_receipt.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { @@ -77,17 +77,18 @@ pub async fn create_receipt_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.edus.private_read_set( + services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, services().rooms + .timeline .get_pdu_count(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", ))?, )?; - services().rooms + services().rooms.user .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -103,7 +104,7 @@ pub async fn create_receipt_route( let mut receipt_content = BTreeMap::new(); receipt_content.insert(body.event_id.to_owned(), receipts); - services().rooms.edus.readreceipt_update( + services().rooms.edus.read_receipt.readreceipt_update( sender_user, &body.room_id, ruma::events::receipt::ReceiptEvent { @@ -112,7 +113,5 @@ pub async fn create_receipt_route( }, )?; - services().flush()?; - Ok(create_receipt::v3::Response {}) } diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index d6699bcf..57e442ab 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -29,7 +29,7 @@ pub async fn redact_event_route( ); let state_lock = mutex_state.lock().await; - let event_id = services().rooms.build_and_append_pdu( + let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index 2c2a5493..efcc4348 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -14,7 +14,7 @@ pub async fn report_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let pdu = match services().rooms.get_pdu(&body.event_id)? { + let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? { Some(pdu) => pdu, _ => { return Err(Error::BadRequest( diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index f8d06023..a7fa9520 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -54,7 +54,7 @@ pub async fn create_room_route( let room_id = RoomId::new(services().globals.server_name()); - services().rooms.get_or_create_shortroomid(&room_id)?; + services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( services().globals @@ -162,7 +162,7 @@ pub async fn create_room_route( } // 1. The room create event - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -176,7 +176,7 @@ pub async fn create_room_route( )?; // 2. Let the room creator join - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -237,7 +237,7 @@ pub async fn create_room_route( } } - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) @@ -253,7 +253,7 @@ pub async fn create_room_route( // 4. Canonical room alias if let Some(room_alias_id) = &alias { - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { @@ -274,7 +274,7 @@ pub async fn create_room_route( // 5. Events set by preset // 5.1 Join Rules - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { @@ -293,7 +293,7 @@ pub async fn create_room_route( )?; // 5.2 History Visibility - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( @@ -310,7 +310,7 @@ pub async fn create_room_route( )?; // 5.3 Guest Access - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { @@ -344,12 +344,12 @@ pub async fn create_room_route( } services().rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)?; + .timeline.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)?; } // 7. Events implied by name and topic if let Some(name) = &body.name { - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) @@ -365,7 +365,7 @@ pub async fn create_room_route( } if let Some(topic) = &body.topic { - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { @@ -390,11 +390,11 @@ pub async fn create_room_route( // Homeserver specific stuff if let Some(alias) = alias { - services().rooms.set_alias(&alias, Some(&room_id))?; + services().rooms.alias.set_alias(&alias, &room_id)?; } if body.visibility == room::Visibility::Public { - services().rooms.set_public(&room_id, true)?; + services().rooms.directory.set_public(&room_id)?; } info!("{} created a room", sender_user); @@ -412,7 +412,7 @@ pub async fn get_room_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -422,6 +422,7 @@ pub async fn get_room_event_route( Ok(get_room_event::v3::Response { event: services() .rooms + .timeline .get_pdu(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? .to_room_event(), @@ -438,7 +439,7 @@ pub async fn get_room_aliases_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -448,7 +449,7 @@ pub async fn get_room_aliases_route( Ok(aliases::v3::Response { aliases: services() .rooms - .room_aliases(&body.room_id) + .alias.local_aliases_for_room(&body.room_id) .filter_map(|a| a.ok()) .collect(), }) @@ -479,7 +480,7 @@ pub async fn upgrade_room_route( // Create a replacement room let replacement_room = RoomId::new(services().globals.server_name()); services().rooms - .get_or_create_shortroomid(&replacement_room)?; + .short.get_or_create_shortroomid(&replacement_room)?; let mutex_state = Arc::clone( services().globals @@ -493,7 +494,7 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = services().rooms.build_and_append_pdu( + let tombstone_event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { @@ -525,6 +526,7 @@ pub async fn upgrade_room_route( // Get the old room creation event let mut create_event_content = serde_json::from_str::( services().rooms + .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content @@ -572,7 +574,7 @@ pub async fn upgrade_room_route( )); } - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&create_event_content) @@ -587,7 +589,7 @@ pub async fn upgrade_room_route( )?; // Join the new room - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -625,12 +627,12 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { - let event_content = match services().rooms.room_state_get(&body.room_id, &event_type, "")? { + let event_content = match services().rooms.state_accessor.room_state_get(&body.room_id, &event_type, "")? { Some(v) => v.content.clone(), None => continue, // Skipping missing events. }; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), content: event_content, @@ -645,14 +647,15 @@ pub async fn upgrade_room_route( } // Moves any local aliases to the new room - for alias in services().rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) { + for alias in services().rooms.alias.local_aliases_for_room(&body.room_id).filter_map(|r| r.ok()) { services().rooms - .set_alias(&alias, Some(&replacement_room))?; + .alias.set_alias(&alias, &replacement_room)?; } // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( services().rooms + .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content @@ -666,7 +669,7 @@ pub async fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = services().rooms.build_and_append_pdu( + let _ = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index b7eecd5a..f648649b 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -24,6 +24,7 @@ pub async fn search_events_route( let room_ids = filter.rooms.clone().unwrap_or_else(|| { services().rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect() @@ -34,7 +35,7 @@ pub async fn search_events_route( let mut searches = Vec::new(); for room_id in room_ids { - if !services().rooms.is_joined(sender_user, &room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -43,6 +44,7 @@ pub async fn search_events_route( if let Some(search) = services() .rooms + .search .search_pdus(&room_id, &search_criteria.search_term)? { searches.push(search.0.peekable()); @@ -86,6 +88,7 @@ pub async fn search_events_route( rank: None, result: services() .rooms + .timeline .get_pdu_from_id(result)? .map(|pdu| pdu.to_room_event()), }) diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index b2dfe2a7..ece74536 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -90,9 +90,10 @@ pub async fn get_state_events_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? && !matches!( services().rooms + .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -115,6 +116,7 @@ pub async fn get_state_events_route( Ok(get_state_events::v3::Response { room_state: services() .rooms + .state_accessor .room_state_full(&body.room_id) .await? .values() @@ -136,10 +138,10 @@ pub async fn get_state_events_for_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? && !matches!( services().rooms - .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + .state_accessor.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -160,7 +162,7 @@ pub async fn get_state_events_for_key_route( let event = services() .rooms - .room_state_get(&body.room_id, &body.event_type, &body.state_key)? + .state_accessor.room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -185,10 +187,10 @@ pub async fn get_state_events_for_empty_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.is_joined(sender_user, &body.room_id)? + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? && !matches!( services().rooms - .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + .state_accessor.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -209,7 +211,7 @@ pub async fn get_state_events_for_empty_key_route( let event = services() .rooms - .room_state_get(&body.room_id, &body.event_type, "")? + .state_accessor.room_state_get(&body.room_id, &body.event_type, "")? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -269,7 +271,7 @@ async fn send_state_event_for_key_helper( ); let state_lock = mutex_state.lock().await; - let event_id = services().rooms.build_and_append_pdu( + let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get()).expect("content is valid json"), diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index cc4ebf6e..e38ea600 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -172,7 +172,7 @@ async fn sync_helper( }; // TODO: match body.set_presence { - services().rooms.edus.ping_presence(&sender_user)?; + services().rooms.edus.presence.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them let watcher = services().watch(&sender_user, &sender_device); @@ -216,7 +216,7 @@ async fn sync_helper( .filter_map(|r| r.ok()), ); - let all_joined_rooms = services().rooms.rooms_joined(&sender_user).collect::>(); + let all_joined_rooms = services().rooms.state_cache.rooms_joined(&sender_user).collect::>(); for room_id in all_joined_rooms { let room_id = room_id?; @@ -237,9 +237,10 @@ async fn sync_helper( let timeline_pdus; let limited; - if services().rooms.last_timeline_count(&sender_user, &room_id)? > since { + if services().rooms.timeline.last_timeline_count(&sender_user, &room_id)? > since { let mut non_timeline_pdus = services() .rooms + .timeline .pdus_until(&sender_user, &room_id, u64::MAX)? .filter_map(|r| { // Filter out buggy events @@ -250,6 +251,7 @@ async fn sync_helper( }) .take_while(|(pduid, _)| { services().rooms + .timeline .pdu_count(pduid) .map_or(false, |count| count > since) }); @@ -275,6 +277,7 @@ async fn sync_helper( || services() .rooms .edus + .read_receipt .last_privateread_update(&sender_user, &room_id)? > since; @@ -283,24 +286,24 @@ async fn sync_helper( timeline_users.insert(event.sender.as_str().to_owned()); } - services().rooms + services().rooms.lazy_loading .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?; // Database queries: - let current_shortstatehash = if let Some(s) = services().rooms.current_shortstatehash(&room_id)? { + let current_shortstatehash = if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { s } else { error!("Room {} has no state", room_id); continue; }; - let since_shortstatehash = services().rooms.get_token_shortstatehash(&room_id, since)?; + let since_shortstatehash = services().rooms.user.get_token_shortstatehash(&room_id, since)?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { - let joined_member_count = services().rooms.room_joined_count(&room_id)?.unwrap_or(0); - let invited_member_count = services().rooms.room_invited_count(&room_id)?.unwrap_or(0); + let joined_member_count = services().rooms.state_cache.room_joined_count(&room_id)?.unwrap_or(0); + let invited_member_count = services().rooms.state_cache.room_invited_count(&room_id)?.unwrap_or(0); // Recalculate heroes (first 5 members) let mut heroes = Vec::new(); @@ -311,7 +314,7 @@ async fn sync_helper( for hero in services() .rooms - .all_pdus(&sender_user, &room_id)? + .timeline.all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) .map(|(_, pdu)| { @@ -329,8 +332,8 @@ async fn sync_helper( if matches!( content.membership, MembershipState::Join | MembershipState::Invite - ) && (services().rooms.is_joined(&user_id, &room_id)? - || services().rooms.is_invited(&user_id, &room_id)?) + ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? + || services().rooms.state_cache.is_invited(&user_id, &room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -371,17 +374,17 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - let current_state_ids = services().rooms.state_full_ids(current_shortstatehash).await?; + let current_state_ids = services().rooms.state_accessor.state_full_ids(current_shortstatehash).await?; let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); let mut i = 0; for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services().rooms.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services().rooms.short.get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -398,7 +401,7 @@ async fn sync_helper( || body.full_state || timeline_users.contains(&state_key) { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -420,12 +423,12 @@ async fn sync_helper( } // Reset lazy loading because this is an initial sync - services().rooms + services().rooms.lazy_loading .lazy_load_reset(&sender_user, &sender_device, &room_id)?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. - services().rooms.lazy_load_mark_sent( + services().rooms.lazy_loading.lazy_load_mark_sent( &sender_user, &sender_device, &room_id, @@ -449,6 +452,7 @@ async fn sync_helper( let since_sender_member: Option = services() .rooms + .state_accessor .state_get( since_shortstatehash, &StateEventType::RoomMember, @@ -467,12 +471,12 @@ async fn sync_helper( let mut lazy_loaded = HashSet::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = services().rooms.state_full_ids(current_shortstatehash).await?; - let since_state_ids = services().rooms.state_full_ids(since_shortstatehash).await?; + let current_state_ids = services().rooms.state_accessor.state_full_ids(current_shortstatehash).await?; + let since_state_ids = services().rooms.state_accessor.state_full_ids(since_shortstatehash).await?; for (key, id) in current_state_ids { if body.full_state || since_state_ids.get(&key) != Some(&id) { - let pdu = match services().rooms.get_pdu(&id)? { + let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, None => { error!("Pdu in state not found: {}", id); @@ -505,14 +509,14 @@ async fn sync_helper( continue; } - if !services().rooms.lazy_load_was_sent_before( + if !services().rooms.lazy_loading.lazy_load_was_sent_before( &sender_user, &sender_device, &room_id, &event.sender, )? || lazy_load_send_redundant { - if let Some(member_event) = services().rooms.room_state_get( + if let Some(member_event) = services().rooms.state_accessor.room_state_get( &room_id, &StateEventType::RoomMember, event.sender.as_str(), @@ -523,7 +527,7 @@ async fn sync_helper( } } - services().rooms.lazy_load_mark_sent( + services().rooms.lazy_loading.lazy_load_mark_sent( &sender_user, &sender_device, &room_id, @@ -533,11 +537,12 @@ async fn sync_helper( let encrypted_room = services() .rooms - .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .state_accessor.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? .is_some(); let since_encryption = services().rooms + .state_accessor .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?; // Calculations: @@ -588,6 +593,7 @@ async fn sync_helper( // If the user is in a new encrypted room, give them all joined users device_list_updates.extend( services().rooms + .state_cache .room_members(&room_id) .flatten() .filter(|user_id| { @@ -627,6 +633,7 @@ async fn sync_helper( let notification_count = if send_notification_counts { Some( services().rooms + .user .notification_count(&sender_user, &room_id)? .try_into() .expect("notification count can't go that high"), @@ -638,6 +645,7 @@ async fn sync_helper( let highlight_count = if send_notification_counts { Some( services().rooms + .user .highlight_count(&sender_user, &room_id)? .try_into() .expect("highlight count can't go that high"), @@ -649,7 +657,7 @@ async fn sync_helper( let prev_batch = timeline_pdus .first() .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { - Ok(Some(services().rooms.pdu_count(pdu_id)?.to_string())) + Ok(Some(services().rooms.timeline.pdu_count(pdu_id)?.to_string())) })?; let room_events: Vec<_> = timeline_pdus @@ -660,15 +668,16 @@ async fn sync_helper( let mut edus: Vec<_> = services() .rooms .edus + .read_receipt .readreceipts_since(&room_id, since) .filter_map(|r| r.ok()) // Filter out buggy events .map(|(_, _, v)| v) .collect(); - if services().rooms.edus.last_typing_update(&room_id, &services().globals)? > since { + if services().rooms.edus.typing.last_typing_update(&room_id)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typings_all(&room_id)?) + &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -676,7 +685,7 @@ async fn sync_helper( } // Save the state after this sync so we can send the correct state diff next sync - services().rooms + services().rooms.user .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; let joined_room = JoinedRoom { @@ -723,6 +732,7 @@ async fn sync_helper( for (user_id, presence) in services().rooms .edus + .presence .presence_since(&room_id, since)? { match presence_updates.entry(user_id) { @@ -755,7 +765,7 @@ async fn sync_helper( } let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = services().rooms.rooms_left(&sender_user).collect(); + let all_left_rooms: Vec<_> = services().rooms.state_cache.rooms_left(&sender_user).collect(); for result in all_left_rooms { let (room_id, left_state_events) = result?; @@ -773,7 +783,7 @@ async fn sync_helper( drop(insert_lock); } - let left_count = services().rooms.get_left_count(&room_id, &sender_user)?; + let left_count = services().rooms.state_cache.get_left_count(&room_id, &sender_user)?; // Left before last sync if Some(since) >= left_count { @@ -797,7 +807,7 @@ async fn sync_helper( } let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = services().rooms.rooms_invited(&sender_user).collect(); + let all_invited_rooms: Vec<_> = services().rooms.state_cache.rooms_invited(&sender_user).collect(); for result in all_invited_rooms { let (room_id, invite_state_events) = result?; @@ -815,7 +825,7 @@ async fn sync_helper( drop(insert_lock); } - let invite_count = services().rooms.get_invite_count(&room_id, &sender_user)?; + let invite_count = services().rooms.state_cache.get_invite_count(&room_id, &sender_user)?; // Invited before last sync if Some(since) >= invite_count { @@ -835,12 +845,13 @@ async fn sync_helper( for user_id in left_encrypted_users { let still_share_encrypted_room = services() .rooms + .user .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? .filter_map(|r| r.ok()) .filter_map(|other_room_id| { Some( services().rooms - .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .state_accessor.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), ) @@ -925,12 +936,14 @@ fn share_encrypted_room( ) -> Result { Ok(services() .rooms + .user .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? .filter_map(|r| r.ok()) .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { Some( services().rooms + .state_accessor .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index afd5d6b3..abb669b1 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -11,7 +11,7 @@ pub async fn create_typing_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_joined(sender_user, &body.room_id)? { + if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "You are not in this room.", @@ -19,14 +19,14 @@ pub async fn create_typing_event_route( } if let Typing::Yes(duration) = body.state { - services().rooms.edus.typing_add( + services().rooms.edus.typing.typing_add( sender_user, &body.room_id, duration.as_millis() as u64 + utils::millis_since_unix_epoch(), )?; } else { services().rooms - .edus + .edus.typing .typing_remove(sender_user, &body.room_id)?; } diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index 60b4e2fa..c94a283e 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -50,11 +50,11 @@ pub async fn search_users_route( let user_is_in_public_rooms = services().rooms - .rooms_joined(&user_id) + .state_cache.rooms_joined(&user_id) .filter_map(|r| r.ok()) .any(|room| { services().rooms - .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .state_accessor.room_state_get(&room, &StateEventType::RoomJoinRules, "") .map_or(false, |event| { event.map_or(false, |event| { serde_json::from_str(event.content.get()) @@ -71,7 +71,7 @@ pub async fn search_users_route( let user_is_in_shared_rooms = services() .rooms - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) + .user.get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) .ok()? .next() .is_some(); diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index 2a804f97..9917979c 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -14,7 +14,7 @@ pub async fn turn_server_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let turn_secret = services().globals.turn_secret(); + let turn_secret = services().globals.turn_secret().clone(); let (username, password) = if !turn_secret.is_empty() { let expiry = SecondsSinceUnixEpoch::from_system_time( diff --git a/src/api/server_server.rs b/src/api/server_server.rs index bacc1ac7..9aa2beb9 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -669,7 +669,7 @@ pub async fn send_transaction_message_route( } }; - acl_check(&sender_servername, &room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &room_id)?; let mutex = Arc::clone( services().globals @@ -727,7 +727,7 @@ pub async fn send_transaction_message_route( .event_ids .iter() .filter_map(|id| { - services().rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) + services().rooms.timeline.get_pdu_count(id).ok().flatten().map(|r| (id, r)) }) .max_by_key(|(_, count)| *count) { @@ -744,7 +744,7 @@ pub async fn send_transaction_message_route( content: ReceiptEventContent(receipt_content), room_id: room_id.clone(), }; - services().rooms.edus.readreceipt_update( + services().rooms.edus.read_receipt.readreceipt_update( &user_id, &room_id, event, @@ -757,15 +757,15 @@ pub async fn send_transaction_message_route( } } Edu::Typing(typing) => { - if services().rooms.is_joined(&typing.user_id, &typing.room_id)? { + if services().rooms.state_cache.is_joined(&typing.user_id, &typing.room_id)? { if typing.typing { - services().rooms.edus.typing_add( + services().rooms.edus.typing.typing_add( &typing.user_id, &typing.room_id, 3000 + utils::millis_since_unix_epoch(), )?; } else { - services().rooms.edus.typing_remove( + services().rooms.edus.typing.typing_remove( &typing.user_id, &typing.room_id, )?; @@ -1031,7 +1031,7 @@ pub(crate) async fn get_auth_chain<'a>( let mut i = 0; for id in starting_events { - let short = services().rooms.get_or_create_shorteventid(&id)?; + let short = services().rooms.short.get_or_create_shorteventid(&id)?; let bucket_id = (short % NUM_BUCKETS as u64) as usize; buckets[bucket_id].insert((short, id.clone())); i += 1; @@ -1050,7 +1050,7 @@ pub(crate) async fn get_auth_chain<'a>( } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services().rooms.get_auth_chain_from_cache(&chunk_key)? { + if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&chunk_key)? { hits += 1; full_auth_chain.extend(cached.iter().copied()); continue; @@ -1062,13 +1062,14 @@ pub(crate) async fn get_auth_chain<'a>( let mut misses2 = 0; let mut i = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = services().rooms.get_auth_chain_from_cache(&[sevent_id])? { + if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&[sevent_id])? { hits2 += 1; chunk_cache.extend(cached.iter().copied()); } else { misses2 += 1; let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id)?); services().rooms + .auth_chain .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; println!( "cache missed event {} with auth chain len {}", @@ -1091,7 +1092,7 @@ pub(crate) async fn get_auth_chain<'a>( ); let chunk_cache = Arc::new(chunk_cache); services().rooms - .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + .auth_chain.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; full_auth_chain.extend(chunk_cache.iter()); } @@ -1104,7 +1105,7 @@ pub(crate) async fn get_auth_chain<'a>( Ok(full_auth_chain .into_iter() - .filter_map(move |sid| services().rooms.get_eventid_from_short(sid).ok())) + .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) } #[tracing::instrument(skip(event_id))] @@ -1116,14 +1117,14 @@ fn get_auth_chain_inner( let mut found = HashSet::new(); while let Some(event_id) = todo.pop() { - match services().rooms.get_pdu(&event_id) { + match services().rooms.timeline.get_pdu(&event_id) { Ok(Some(pdu)) => { if pdu.room_id != room_id { return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); } for auth_event in &pdu.auth_events { let sauthevent = services() - .rooms + .rooms.short .get_or_create_shorteventid(auth_event)?; if !found.contains(&sauthevent) { @@ -1162,7 +1163,7 @@ pub async fn get_event_route( .expect("server is authenticated"); let event = services() - .rooms + .rooms.timeline .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1174,7 +1175,7 @@ pub async fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !services().rooms.server_in_room(sender_servername, room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", @@ -1203,21 +1204,21 @@ pub async fn get_missing_events_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", )); } - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); let mut i = 0; while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { - if let Some(pdu) = services().rooms.get_pdu_json(&queued_events[i])? { + if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? { let room_id_str = pdu .get("room_id") .and_then(|val| val.as_str()) @@ -1275,17 +1276,17 @@ pub async fn get_event_authorization_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let event = services() - .rooms + .rooms.timeline .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1301,7 +1302,7 @@ pub async fn get_event_authorization_route( Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids - .filter_map(|id| services().rooms.get_pdu_json(&id).ok()?) + .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok()?) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -1322,17 +1323,17 @@ pub async fn get_room_state_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let shortstatehash = services() - .rooms + .rooms.state_accessor .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1340,13 +1341,13 @@ pub async fn get_room_state_route( ))?; let pdus = services() - .rooms + .rooms.state_accessor .state_full_ids(shortstatehash) .await? .into_iter() .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( - services().rooms.get_pdu_json(&id).unwrap().unwrap(), + services().rooms.timeline.get_pdu_json(&id).unwrap().unwrap(), ) }) .collect(); @@ -1357,7 +1358,7 @@ pub async fn get_room_state_route( Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids .map(|id| { - services().rooms.get_pdu_json(&id).map(|maybe_json| { + services().rooms.timeline.get_pdu_json(&id).map(|maybe_json| { PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) }) }) @@ -1382,17 +1383,17 @@ pub async fn get_room_state_ids_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.server_in_room(sender_servername, &body.room_id)? { + if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let shortstatehash = services() - .rooms + .rooms.state_accessor .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1400,7 +1401,7 @@ pub async fn get_room_state_ids_route( ))?; let pdu_ids = services() - .rooms + .rooms.state_accessor .state_full_ids(shortstatehash) .await? .into_iter() @@ -1426,7 +1427,7 @@ pub async fn create_join_event_template_route( return Err(Error::bad_config("Federation is disabled.")); } - if !services().rooms.exists(&body.room_id)? { + if !services().rooms.metadata.exists(&body.room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server.", @@ -1438,7 +1439,7 @@ pub async fn create_join_event_template_route( .as_ref() .expect("server is authenticated"); - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; let mutex_state = Arc::clone( services().globals @@ -1452,7 +1453,7 @@ pub async fn create_join_event_template_route( // TODO: Conduit does not implement restricted join rules yet, we always reject let join_rules_event = - services().rooms + services().rooms.state_accessor .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; let join_rules_event_content: Option = join_rules_event @@ -1477,8 +1478,8 @@ pub async fn create_join_event_template_route( } } - let room_version_id = services().rooms.state.get_room_version(&body.room_id); - if !body.ver.contains(room_version_id) { + let room_version_id = services().rooms.state.get_room_version(&body.room_id)?; + if !body.ver.contains(&room_version_id) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: room_version_id, @@ -1505,7 +1506,7 @@ pub async fn create_join_event_template_route( unsigned: None, state_key: Some(body.user_id.to_string()), redacts: None, - }, &body.user_id, &body.room_id, &state_lock); + }, &body.user_id, &body.room_id, &state_lock)?; drop(state_lock); @@ -1524,18 +1525,18 @@ async fn create_join_event( return Err(Error::bad_config("Federation is disabled.")); } - if !services().rooms.exists(room_id)? { + if !services().rooms.metadata.exists(room_id)? { return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server.", )); } - acl_check(sender_servername, room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, room_id)?; // TODO: Conduit does not implement restricted join rules yet, we always reject let join_rules_event = services() - .rooms + .rooms.state_accessor .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; let join_rules_event_content: Option = join_rules_event @@ -1562,8 +1563,8 @@ async fn create_join_event( // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = services() - .rooms - .current_shortstatehash(room_id)? + .rooms.state + .get_room_shortstatehash(room_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Pdu state not found.", @@ -1602,22 +1603,15 @@ async fn create_join_event( .or_default(), ); let mutex_lock = mutex.lock().await; - let pdu_id = services().rooms.event_handler.handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) - .await - .map_err(|e| { - warn!("Error while handling incoming send join PDU: {}", e); - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? + let pdu_id: Vec = services().rooms.event_handler.handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Could not accept incoming PDU as timeline event.", ))?; drop(mutex_lock); - let state_ids = services().rooms.state_full_ids(shortstatehash).await?; + let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; let auth_chain_ids = get_auth_chain( room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), @@ -1626,6 +1620,7 @@ async fn create_join_event( let servers = services() .rooms + .state_cache .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != services().globals.server_name()); @@ -1634,12 +1629,12 @@ async fn create_join_event( Ok(RoomState { auth_chain: auth_chain_ids - .filter_map(|id| services().rooms.get_pdu_json(&id).ok().flatten()) + .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), state: state_ids .iter() - .filter_map(|(_, id)| services().rooms.get_pdu_json(id).ok().flatten()) + .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), }) @@ -1692,7 +1687,7 @@ pub async fn create_invite_route( .as_ref() .expect("server is authenticated"); - acl_check(sender_servername, &body.room_id)?; + services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; if !services().rooms.is_supported_version(&body.room_version) { return Err(Error::BadRequest( @@ -1767,8 +1762,8 @@ pub async fn create_invite_route( invite_state.push(pdu.to_stripped_state_event()); // If the room already exists, the remote server will notify us about the join via /send - if !services().rooms.exists(&pdu.room_id)? { - services().rooms.update_membership( + if !services().rooms.metadata.exists(&pdu.room_id)? { + services().rooms.state_cache.update_membership( &body.room_id, &invited_user, MembershipState::Invite, @@ -1931,274 +1926,6 @@ pub async fn claim_keys_route( }) } -#[tracing::instrument(skip_all)] -pub(crate) async fn fetch_required_signing_keys( - event: &BTreeMap, - pub_key_map: &RwLock>>, -) -> Result<()> { - let signatures = event - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let fetch_res = fetch_signing_keys( - signature_server.as_str().try_into().map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?, - signature_ids, - ) - .await; - - let keys = match fetch_res { - Ok(keys) => keys, - Err(_) => { - warn!("Signature verification failed: Could not fetch signing key.",); - continue; - } - }; - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(signature_server.clone(), keys); - } - - Ok(()) -} - -// Gets a list of servers for which we don't have the signing key yet. We go over -// the PDUs and either cache the key or add it to the list that needs to be retrieved. -fn get_server_keys_from_cache( - pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, - room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, -) -> Result<()> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - - let event_id = format!( - "${}", - ruma::signatures::reference_hash(&value, room_version) - .expect("ruma can calculate reference hashes") - ); - let event_id = <&EventId>::try_from(event_id.as_str()) - .expect("ruma's reference hashes are valid event ids"); - - if let Some((time, tries)) = services() - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(event_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", event_id); - return Err(Error::BadServerResponse("bad event, still backing off")); - } - } - - let signatures = value - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?; - - if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { - continue; - } - - trace!("Loading signing keys for {}", origin); - - let result: BTreeMap<_, _> = services() - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if !contains_all_ids(&result) { - trace!("Signing key not loaded for {}", origin); - servers.insert(origin.to_owned(), BTreeMap::new()); - } - - pub_key_map.insert(origin.to_string(), result); - } - - Ok(()) -} - -pub(crate) async fn fetch_join_signing_keys( - event: &create_join_event::v2::Response, - room_version: &RoomVersionId, - pub_key_map: &RwLock>>, -) -> Result<()> { - let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = - BTreeMap::new(); - - { - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - - // Try to fetch keys, failure is okay - // Servers we couldn't find in the cache will be added to `servers` - for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); - } - for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); - } - - drop(pkm); - } - - if servers.is_empty() { - // We had all keys locally - return Ok(()); - } - - for server in services().globals.trusted_servers() { - trace!("Asking batch signing keys from trusted server {}", server); - if let Ok(keys) = services() - .sending - .send_federation_request( - server, - get_remote_server_keys_batch::v2::Request { - server_keys: servers.clone(), - }, - ) - .await - { - trace!("Got signing keys: {:?}", keys); - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - for k in keys.server_keys { - let k = k.deserialize().unwrap(); - - // TODO: Check signature from trusted server? - servers.remove(&k.server_name); - - let result = services() - .globals - .add_signing_key(&k.server_name, k.clone())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); - - pkm.insert(k.server_name.to_string(), result); - } - } - - if servers.is_empty() { - return Ok(()); - } - } - - let mut futures: FuturesUnordered<_> = servers - .into_iter() - .map(|(server, _)| async move { - ( - services().sending - .send_federation_request( - &server, - get_server_keys::v2::Request::new(), - ) - .await, - server, - ) - }) - .collect(); - - while let Some(result) = futures.next().await { - if let (Ok(get_keys_response), origin) = result { - let result: BTreeMap<_, _> = services() - .globals - .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); - } - } - - Ok(()) -} - -/// Returns Ok if the acl allows the server -fn acl_check(server_name: &ServerName, room_id: &RoomId) -> Result<()> { - let acl_event = match services() - .rooms - .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? - { - Some(acl) => acl, - None => return Ok(()), - }; - - let acl_event_content: RoomServerAclEventContent = - match serde_json::from_str(acl_event.content.get()) { - Ok(content) => content, - Err(_) => { - warn!("Invalid ACL event"); - return Ok(()); - } - }; - - if acl_event_content.is_allowed(server_name) { - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server was denied by ACL", - )) - } -} - #[cfg(test)] mod tests { use super::{add_port_to_hostname, get_ip_with_port, FedDest}; diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index edb027e9..f427ba71 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -54,11 +54,11 @@ impl service::appservice::Data for KeyValueDatabase { ) } - fn iter_ids(&self) -> Result>>> { - Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { + fn iter_ids<'a>(&'a self) -> Result> + 'a>> { + Ok(Box::new(self.id_appserviceregistrations.iter().map(|(id, _)| { utils::string_from_bytes(&id) .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) - })) + }))) } fn all(&self) -> Result> { diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index c48afa9a..727004e7 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -16,13 +16,13 @@ impl service::rooms::directory::Data for KeyValueDatabase { } fn public_rooms(&self) -> Box>>> { - self.publicroomids.iter().map(|(bytes, _)| { + Box::new(self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Room ID in publicroomids is invalid unicode.") })?, ) .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) + })) } } diff --git a/src/database/mod.rs b/src/database/mod.rs index 4ea619a8..22bfef06 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -513,7 +513,7 @@ impl KeyValueDatabase { let states_parents = last_roomsstatehash.map_or_else( || Ok(Vec::new()), |&last_roomsstatehash| { - db.rooms.load_shortstatehash_info(dbg!(last_roomsstatehash)) + db.rooms.state_accessor.load_shortstatehash_info(dbg!(last_roomsstatehash)) }, )?; diff --git a/src/lib.rs b/src/lib.rs index c6e65697..72399003 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,7 +28,7 @@ enum ServicesEnum { Rocksdb(Services) } -pub fn services() -> Services { - SERVICES.read().unwrap() +pub fn services<'a>() -> &'a Services { + &SERVICES.read().unwrap() } diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 7a399223..c56c69d2 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -31,80 +31,18 @@ impl Service { event_type: RoomAccountDataEventType, data: &T, ) -> Result<()> { - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); - - let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); - roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); - - let mut key = prefix; - key.extend_from_slice(event_type.to_string().as_bytes()); - - let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling - if json.get("type").is_none() || json.get("content").is_none() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Account data doesn't have all required fields.", - )); - } - - self.roomuserdataid_accountdata.insert( - &roomuserdataid, - &serde_json::to_vec(&json).expect("to_vec always works on json values"), - )?; - - let prev = self.roomusertype_roomuserdataid.get(&key)?; - - self.roomusertype_roomuserdataid - .insert(&key, &roomuserdataid)?; - - // Remove old entry - if let Some(prev) = prev { - self.roomuserdataid_accountdata.remove(&prev)?; - } - - Ok(()) + self.db.update(room_id, user_id, event_type, data) } /// Searches the account data for a specific kind. - #[tracing::instrument(skip(self, room_id, user_id, kind))] + #[tracing::instrument(skip(self, room_id, user_id, event_type))] pub fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: RoomAccountDataEventType, + event_type: RoomAccountDataEventType, ) -> Result> { - let mut key = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(kind.to_string().as_bytes()); - - self.roomusertype_roomuserdataid - .get(&key)? - .and_then(|roomuserdataid| { - self.roomuserdataid_accountdata - .get(&roomuserdataid) - .transpose() - }) - .transpose()? - .map(|data| { - serde_json::from_slice(&data) - .map_err(|_| Error::bad_database("could not deserialize")) - }) - .transpose() + self.db.get(room_id, user_id, event_type) } /// Returns all changes to the account data that happened after `since`. @@ -115,44 +53,6 @@ impl Service { user_id: &UserId, since: u64, ) -> Result>> { - let mut userdata = HashMap::new(); - - let mut prefix = room_id - .map(|r| r.to_string()) - .unwrap_or_default() - .as_bytes() - .to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(user_id.as_bytes()); - prefix.push(0xff); - - // Skip the data that's exactly at since, because we sent that last time - let mut first_possible = prefix.clone(); - first_possible.extend_from_slice(&(since + 1).to_be_bytes()); - - for r in self - .roomuserdataid_accountdata - .iter_from(&first_possible, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(|(k, v)| { - Ok::<_, Error>(( - RoomAccountDataEventType::try_from( - utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( - || Error::bad_database("RoomUserData ID in db is invalid."), - )?) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("RoomUserData ID in db is invalid."))?, - serde_json::from_slice::>(&v).map_err(|_| { - Error::bad_database("Database contains invalid account data.") - })?, - )) - }) - { - let (kind, data) = r?; - userdata.insert(kind, data); - } - - Ok(userdata) + self.db.changes_since(room_id, user_id, since) } } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index dad4ceba..48f828fc 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -192,7 +192,7 @@ impl Service { mutex_lock: &MutexGuard<'_, ()>| { services() .rooms - .build_and_append_pdu( + .timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&message) @@ -213,7 +213,7 @@ impl Service { Some(event) = receiver.recv() => { let message_content = match event { AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => process_admin_message(room_message).await + AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await }; let mutex_state = Arc::clone( @@ -254,20 +254,20 @@ impl Service { let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); - let admin_command = match parse_admin_command(&command_line) { + let admin_command = match self.parse_admin_command(&command_line) { Ok(command) => command, Err(error) => { let server_name = services().globals.server_name(); let message = error .to_string() .replace("server.name", server_name.as_str()); - let html_message = usage_to_html(&message, server_name); + let html_message = self.usage_to_html(&message, server_name); return RoomMessageEventContent::text_html(message, html_message); } }; - match process_admin_command(admin_command, body).await { + match self.process_admin_command(admin_command, body).await { Ok(reply_message) => reply_message, Err(error) => { let markdown_message = format!( @@ -367,6 +367,8 @@ impl Service { } } AdminCommand::ListRooms => { + todo!(); + /* let room_ids = services().rooms.iter_ids(); let output = format!( "Rooms:\n{}", @@ -385,6 +387,7 @@ impl Service { .join("\n") ); RoomMessageEventContent::text_plain(output) + */ } AdminCommand::ListLocalUsers => match services().users.list_local_users() { Ok(users) => { @@ -412,7 +415,7 @@ impl Service { } AdminCommand::GetAuthChain { event_id } => { let event_id = Arc::::from(event_id); - if let Some(event) = services().rooms.get_pdu_json(&event_id)? { + if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? { let room_id_str = event .get("room_id") .and_then(|val| val.as_str()) @@ -473,10 +476,10 @@ impl Service { } AdminCommand::GetPdu { event_id } => { let mut outlier = false; - let mut pdu_json = services().rooms.get_non_outlier_pdu_json(&event_id)?; + let mut pdu_json = services().rooms.timeline.get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { outlier = true; - pdu_json = services().rooms.get_pdu_json(&event_id)?; + pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; } match pdu_json { Some(json) => { @@ -506,7 +509,7 @@ impl Service { None => RoomMessageEventContent::text_plain("PDU not found."), } } - AdminCommand::DatabaseMemoryUsage => match services()._db.memory_usage() { + AdminCommand::DatabaseMemoryUsage => match services().globals.db.memory_usage() { Ok(response) => RoomMessageEventContent::text_plain(response), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to get database memory usage: {}", @@ -825,7 +828,7 @@ impl Service { content.room_version = RoomVersionId::V6; // 1. The room create event - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), @@ -839,7 +842,7 @@ impl Service { )?; // 2. Make conduit bot join - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -866,7 +869,7 @@ impl Service { let mut users = BTreeMap::new(); users.insert(conduit_user.clone(), 100.into()); - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { @@ -884,7 +887,7 @@ impl Service { )?; // 4.1 Join Rules - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) @@ -899,7 +902,7 @@ impl Service { )?; // 4.2 History Visibility - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( @@ -916,7 +919,7 @@ impl Service { )?; // 4.3 Guest Access - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) @@ -933,7 +936,7 @@ impl Service { // 5. Events implied by name and topic let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) .expect("Room name is valid"); - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) @@ -947,7 +950,7 @@ impl Service { &state_lock, )?; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { @@ -968,7 +971,7 @@ impl Service { .try_into() .expect("#admins:server_name is a valid alias name"); - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { @@ -985,7 +988,7 @@ impl Service { &state_lock, )?; - services().rooms.set_alias(&alias, Some(&room_id))?; + services().rooms.alias.set_alias(&alias, &room_id)?; Ok(()) } @@ -1003,7 +1006,8 @@ impl Service { .expect("#admins:server_name is a valid alias name"); let room_id = services() .rooms - .id_from_alias(&admin_room_alias)? + .alias + .resolve_local_alias(&admin_room_alias)? .expect("Admin room must exist"); let mutex_state = Arc::clone( @@ -1021,7 +1025,7 @@ impl Service { .expect("@conduit:server_name is valid"); // Invite and join the real user - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -1043,7 +1047,7 @@ impl Service { &room_id, &state_lock, )?; - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { @@ -1071,7 +1075,7 @@ impl Service { users.insert(conduit_user.to_owned(), 100.into()); users.insert(user_id.to_owned(), 100.into()); - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&RoomPowerLevelsEventContent { @@ -1089,7 +1093,7 @@ impl Service { )?; // Send welcome message - services().rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index cd48e85d..a70bf9c1 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -13,7 +13,7 @@ pub trait Data { fn get_registration(&self, id: &str) -> Result>; - fn iter_ids(&self) -> Result>>>; + fn iter_ids<'a>(&'a self) -> Result> + 'a>>; fn all(&self) -> Result>; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 556ca71c..6cfeab81 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,6 +1,7 @@ mod data; pub use data::Data; +use crate::api::server_server::FedDest; use crate::service::*; use crate::{Config, utils, Error, Result}; @@ -36,7 +37,7 @@ type SyncHandle = ( ); pub struct Service { - db: D, + pub db: D, pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 8e842d4e..ce867fb5 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -22,36 +22,11 @@ impl Service { user_id: &UserId, backup_metadata: &Raw, ) -> Result { - let version = services().globals.next_count()?.to_string(); - - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.insert( - &key, - &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), - )?; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - Ok(version) + self.db.create_backup(user_id, backup_metadata) } pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm.remove(&key)?; - self.backupid_etag.remove(&key)?; - - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) + self.db.delete_backup(user_id, version) } pub fn update_backup( @@ -60,74 +35,18 @@ impl Service { version: &str, backup_metadata: &Raw, ) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Tried to update nonexistent backup.", - )); - } - - self.backupid_algorithm - .insert(&key, backup_metadata.json().get().as_bytes())?; - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - Ok(version.to_owned()) + self.db.update_backup(user_id, version, backup_metadata) } pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, _)| { - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid.")) - }) - .transpose() + self.db.get_latest_backup_version(user_id) } pub fn get_latest_backup( &self, user_id: &UserId, ) -> Result)>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.backupid_algorithm - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|(key, value)| { - let version = utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("backupid_algorithm key is invalid."))?; - - Ok(( - version, - serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("Algorithm in backupid_algorithm is invalid.") - })?, - )) - }) - .transpose() + self.db.get_latest_backup(user_id) } pub fn get_backup( @@ -135,16 +54,7 @@ impl Service { user_id: &UserId, version: &str, ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - self.backupid_algorithm - .get(&key)? - .map_or(Ok(None), |bytes| { - serde_json::from_slice(&bytes) - .map_err(|_| Error::bad_database("Algorithm in backupid_algorithm is invalid.")) - }) + self.db.get_backup(user_id, version) } pub fn add_key( @@ -155,52 +65,15 @@ impl Service { session_id: &str, key_data: &Raw, ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - if self.backupid_algorithm.get(&key)?.is_none() { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Tried to update nonexistent backup.", - )); - } - - self.backupid_etag - .insert(&key, &services().globals.next_count()?.to_be_bytes())?; - - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .insert(&key, key_data.json().get().as_bytes())?; - - Ok(()) + self.db.add_key(user_id, version, room_id, session_id, key_data) } pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - - Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) + self.db.count_keys(user_id, version) } pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - - Ok(utils::u64_from_bytes( - &self - .backupid_etag - .get(&key)? - .ok_or_else(|| Error::bad_database("Backup has no etag."))?, - ) - .map_err(|_| Error::bad_database("etag in backupid_etag invalid."))? - .to_string()) + self.db.get_etag(user_id, version) } pub fn get_all( @@ -208,55 +81,7 @@ impl Service { user_id: &UserId, version: &str, ) -> Result, RoomKeyBackup>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); - - let mut rooms = BTreeMap::, RoomKeyBackup>::new(); - - for result in self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); - - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let room_id = RoomId::parse( - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| Error::bad_database("backupkeyid_backup room_id is invalid."))?, - ) - .map_err(|_| { - Error::bad_database("backupkeyid_backup room_id is invalid room id.") - })?; - - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; - - Ok::<_, Error>((room_id, session_id, key_data)) - }) - { - let (room_id, session_id, key_data) = result?; - rooms - .entry(room_id) - .or_insert_with(|| RoomKeyBackup { - sessions: BTreeMap::new(), - }) - .sessions - .insert(session_id, key_data); - } - - Ok(rooms) + self.db.get_all(user_id, version) } pub fn get_room( @@ -265,35 +90,7 @@ impl Service { version: &str, room_id: &RoomId, ) -> Result>> { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(version.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); - - Ok(self - .backupkeyid_backup - .scan_prefix(prefix) - .map(|(key, value)| { - let mut parts = key.rsplit(|&b| b == 0xff); - - let session_id = - utils::string_from_bytes(parts.next().ok_or_else(|| { - Error::bad_database("backupkeyid_backup key is invalid.") - })?) - .map_err(|_| { - Error::bad_database("backupkeyid_backup session_id is invalid.") - })?; - - let key_data = serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - })?; - - Ok::<_, Error>((session_id, key_data)) - }) - .filter_map(|r| r.ok()) - .collect()) + self.db.get_room(user_id, version, room_id) } pub fn get_session( @@ -303,35 +100,11 @@ impl Service { room_id: &RoomId, session_id: &str, ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - self.backupkeyid_backup - .get(&key)? - .map(|value| { - serde_json::from_slice(&value).map_err(|_| { - Error::bad_database("KeyBackupData in backupkeyid_backup is invalid.") - }) - }) - .transpose() + self.db.get_session(user_id, version, room_id, session_id) } pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) + self.db.delete_all_keys(user_id, version) } pub fn delete_room_keys( @@ -340,18 +113,7 @@ impl Service { version: &str, room_id: &RoomId, ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) + self.db.delete_room_keys(user_id, version, room_id) } pub fn delete_room_key( @@ -361,18 +123,6 @@ impl Service { room_id: &RoomId, session_id: &str, ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(version.as_bytes()); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(session_id.as_bytes()); - - for (outdated_key, _) in self.backupkeyid_backup.scan_prefix(key) { - self.backupkeyid_backup.remove(&outdated_key)?; - } - - Ok(()) + self.db.delete_room_key(user_id, version, room_id, session_id) } } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index a5aca036..5037809c 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -29,7 +29,7 @@ impl Service { file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail - let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type); + let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; @@ -42,13 +42,13 @@ impl Service { pub async fn upload_thumbnail( &self, mxc: String, - content_disposition: &Option, - content_type: &Option, + content_disposition: &Option<&str>, + content_type: &Option<&str>, width: u32, height: u32, file: &[u8], ) -> Result<()> { - let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type); + let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 66a8ae36..64c7f1fa 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -138,6 +138,7 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = services() .rooms + .state_accessor .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) @@ -274,6 +275,7 @@ impl Service { let room_name = if let Some(room_name_pdu) = services().rooms + .state_accessor .room_state_get(&event.room_id, &StateEventType::RoomName, "")? { serde_json::from_str::(room_name_pdu.content.get()) diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 113d2e81..9ea4763e 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -21,7 +21,7 @@ impl Service { } // We only save auth chains for single events in the db - if key.len == 1 { + if key.len() == 1 { // Check DB cache if let Some(chain) = self.db.get_cached_eventid_authchain(key[0]) { diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index a5ce37f1..dbe1b6e8 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -5,7 +5,7 @@ pub mod typing; pub trait Data: presence::Data + read_receipt::Data + typing::Data {} pub struct Service { - presence: presence::Service, - read_receipt: read_receipt::Service, - typing: typing::Service, + pub presence: presence::Service, + pub read_receipt: read_receipt::Service, + pub typing: typing::Service, } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index c9b041c2..8a8725b8 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -8,22 +8,23 @@ use std::{ time::{Duration, Instant}, }; -use futures_util::Future; +use futures_util::{Future, stream::FuturesUnordered}; use ruma::{ api::{ client::error::ErrorKind, - federation::event::{get_event, get_room_state_ids}, + federation::{event::{get_event, get_room_state_ids}, membership::create_join_event, discovery::get_remote_server_keys_batch::{v2::QueryCriteria, self}}, }, - events::{room::create::RoomCreateEventContent, StateEventType}, + events::{room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, StateEventType}, int, serde::Base64, signatures::CanonicalJsonValue, state_res::{self, RoomVersion, StateMap}, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use tracing::{error, info, trace, warn}; -use crate::{service::*, services, Error, PduEvent}; +use crate::{service::*, services, Result, Error, PduEvent}; pub struct Service; @@ -62,10 +63,11 @@ impl Service { is_timeline_event: bool, pub_key_map: &'a RwLock>>, ) -> Result>> { - services().rooms.exists(room_id)?.ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room is unknown to this server", - ))?; + if !services().rooms.metadata.exists(room_id)? { + return Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server", + )}; services() .rooms @@ -76,17 +78,18 @@ impl Service { ))?; // 1. Skip the PDU if we already have it as a timeline event - if let Some(pdu_id) = services().rooms.get_pdu_id(event_id)? { + if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { return Ok(Some(pdu_id.to_vec())); } let create_event = services() .rooms + .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; let first_pdu_in_room = services() - .rooms + .rooms.timeline .first_pdu_in_room(room_id)? .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; @@ -111,7 +114,7 @@ impl Service { room_id, pub_key_map, incoming_pdu.prev_events.clone(), - ); + ).await; let mut errors = 0; for prev_id in dbg!(sorted_prev_events) { @@ -243,7 +246,7 @@ impl Service { room_id: &'a RoomId, value: BTreeMap, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json @@ -367,11 +370,7 @@ impl Service { &incoming_pdu, None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - ) - .map_err(|e| { - error!(e); - Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") - })? { + )? { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth check failed", @@ -400,16 +399,15 @@ impl Service { origin: &ServerName, room_id: &RoomId, pub_key_map: &RwLock>>, - ) -> Result>, String> { + ) -> Result>> { // Skip the PDU if we already have it as a timeline event - if let Ok(Some(pduid)) = services().rooms.get_pdu_id(&incoming_pdu.event_id) { + if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) { return Ok(Some(pduid)); } if services() .rooms - .is_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to ask db for soft fail".to_owned())? + .pdu_metadata.is_event_soft_failed(&incoming_pdu.event_id)? { return Err("Event has been soft failed".into()); } @@ -438,11 +436,11 @@ impl Service { let prev_event = &*incoming_pdu.prev_events[0]; let prev_event_sstatehash = services() .rooms - .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())?; + .state_accessor + .pdu_shortstatehash(prev_event)?; let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(services().rooms.state_full_ids(shortstatehash).await) + Some(services().rooms.state_accessor.state_full_ids(shortstatehash).await) } else { None }; @@ -451,18 +449,19 @@ impl Service { info!("Using cached state"); let prev_pdu = services() .rooms + .timeline .get_pdu(prev_event) .ok() .flatten() .ok_or_else(|| { - "Could not find prev event, but we know the state.".to_owned() + Error::bad_database("Could not find prev event, but we know the state.") })?; if let Some(state_key) = &prev_pdu.state_key { let shortstatekey = services() .rooms - .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + .short + .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key)?; state.insert(shortstatekey, Arc::from(prev_event)); // Now it's the state after the pdu @@ -501,18 +500,18 @@ impl Service { for (sstatehash, prev_event) in extremity_sstatehashes { let mut leaf_state: BTreeMap<_, _> = services() .rooms + .state_accessor .state_full_ids(sstatehash) - .await - .map_err(|_| "Failed to ask db for room state.".to_owned())?; + .await?; if let Some(state_key) = &prev_event.state_key { let shortstatekey = services() .rooms + .short .get_or_create_shortstatekey( &prev_event.kind.to_string().into(), state_key, - ) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + )?; leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); // Now it's the state after the pdu } @@ -536,8 +535,7 @@ impl Service { .rooms .auth_chain .get_auth_chain(room_id, starting_events, services()) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? + .await? .collect(), ); @@ -563,16 +561,14 @@ impl Service { .map(|((event_type, state_key), event_id)| { let shortstatekey = services() .rooms + .short .get_or_create_shortstatekey( &event_type.to_string().into(), &state_key, - ) - .map_err(|_| { - "Failed to get_or_create_shortstatekey".to_owned() - })?; + )?; Ok((shortstatekey, event_id)) }) - .collect::>()?, + .collect::>()?, ), Err(e) => { warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); @@ -617,20 +613,19 @@ impl Service { let state_key = pdu .state_key .clone() - .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; + .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; let shortstatekey = services() .rooms - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key)?; match state.entry(shortstatekey) { btree_map::Entry::Vacant(v) => { v.insert(Arc::from(&*pdu.event_id)); } btree_map::Entry::Occupied(_) => return Err( - "State event's type and state_key combination exists multiple times." - .to_owned(), + Error::bad_database("State event's type and state_key combination exists multiple times."), ), } } @@ -638,21 +633,21 @@ impl Service { // The original create event must still be in the state let create_shortstatekey = services() .rooms - .get_shortstatekey(&StateEventType::RoomCreate, "") - .map_err(|_| "Failed to talk to db.")? + .short + .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); if state.get(&create_shortstatekey).map(|id| id.as_ref()) != Some(&create_event.event_id) { - return Err("Incoming event refers to wrong create event.".to_owned()); + return Err(Error::bad_database("Incoming event refers to wrong create event.")); } state_at_incoming_event = Some(state); } Err(e) => { warn!("Fetching state for event failed: {}", e); - return Err("Fetching state for event failed".into()); + return Err(e); } }; } @@ -669,17 +664,18 @@ impl Service { |k, s| { services() .rooms + .short .get_shortstatekey(&k.to_string().into(), s) .ok() .flatten() .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| services().rooms.get_pdu(event_id).ok().flatten()) + .and_then(|event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten()) }, ) .map_err(|_e| "Auth check failed.".to_owned())?; if !check_result { - return Err("Event has failed auth check with state at the event.".into()); + return Err(Error::bad_database("Event has failed auth check with state at the event.")); } info!("Auth check succeeded"); @@ -701,8 +697,8 @@ impl Service { info!("Calculating extremities"); let mut extremities = services() .rooms - .get_pdu_leaves(room_id) - .map_err(|_| "Failed to load room leaves".to_owned())?; + .state + .get_forward_extremities(room_id)?; // Remove any forward extremities that are referenced by this incoming event's prev_events for prev_event in &incoming_pdu.prev_events { @@ -721,10 +717,9 @@ impl Service { .map(|(shortstatekey, id)| { services() .rooms - .compress_state_event(*shortstatekey, id) - .map_err(|_| "Failed to compress_state_event".to_owned()) + .compress_state_event(*shortstatekey, id)? }) - .collect::>()?; + .collect::>()?; // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it info!("Starting soft fail auth check"); @@ -737,16 +732,14 @@ impl Service { &incoming_pdu.sender, incoming_pdu.state_key.as_deref(), &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; + )? let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed.".to_owned())?; + )?; if soft_fail { self.append_incoming_pdu( @@ -756,18 +749,13 @@ impl Service { state_ids_compressed, soft_fail, &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; + )?; // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); services() .rooms - .mark_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to set soft failed flag".to_owned())?; + .mark_event_soft_failed(&incoming_pdu.event_id)?; return Err("Event has been soft failed".into()); } @@ -775,15 +763,15 @@ impl Service { info!("Loading current room state ids"); let current_sstatehash = services() .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? + .state + .get_room_shortstatehash(room_id)? .expect("every room has state"); let current_state_ids = services() .rooms + .state_accessor .state_full_ids(current_sstatehash) - .await - .map_err(|_| "Failed to load room state.")?; + .await?; info!("Preparing for stateres to derive new room state"); let mut extremity_sstatehashes = HashMap::new(); @@ -792,14 +780,14 @@ impl Service { for id in dbg!(&extremities) { match services() .rooms - .get_pdu(id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? + .timeline + .get_pdu(id)? { Some(leaf_pdu) => { extremity_sstatehashes.insert( services() - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? + .rooms.state_accessor + .pdu_shortstatehash(&leaf_pdu.event_id)? .ok_or_else(|| { error!( "Found extremity pdu with no statehash in db: {:?}", @@ -832,8 +820,8 @@ impl Service { if let Some(state_key) = &incoming_pdu.state_key { let shortstatekey = services() .rooms - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; + .short + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)? state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } @@ -852,10 +840,9 @@ impl Service { .map(|(k, id)| { services() .rooms - .compress_state_event(*k, id) - .map_err(|_| "Failed to compress_state_event.".to_owned()) + .compress_state_event(*k, id)? }) - .collect::>()? + .collect::>()? } else { info!("Loading auth chains"); // We do need to force an update to this room's state @@ -871,8 +858,7 @@ impl Service { room_id, state.iter().map(|(_, id)| id.clone()).collect(), ) - .await - .map_err(|_| "Failed to load auth chain.".to_owned())? + .await? .collect(), ); } @@ -886,11 +872,10 @@ impl Service { .filter_map(|(k, id)| { services() .rooms - .get_statekey_from_short(k) + .get_statekey_from_short(k)? // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) - .map_err(|e| warn!("Failed to get_statekey_from_short: {}", e)) .ok() }) .collect::>() @@ -927,14 +912,13 @@ impl Service { .map(|((event_type, state_key), event_id)| { let shortstatekey = services() .rooms - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; + .short + .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; services() .rooms .compress_state_event(shortstatekey, &event_id) - .map_err(|_| "Failed to compress state event".to_owned()) }) - .collect::>()? + .collect::>()? }; // Set the new room state to the resolved state @@ -942,8 +926,7 @@ impl Service { info!("Forcing new room state"); services() .rooms - .force_state(room_id, new_room_state) - .map_err(|_| "Failed to set new room state.".to_owned())?; + .force_state(room_id, new_room_state)?; } } @@ -962,11 +945,7 @@ impl Service { state_ids_compressed, soft_fail, &state_lock, - ) - .map_err(|e| { - warn!("Failed to add pdu to db: {}", e); - "Failed to add pdu to db.".to_owned() - })?; + )?; info!("Appended incoming pdu"); @@ -1227,9 +1206,279 @@ impl Service { .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; + })?; (sorted, eventid_info) } + + #[tracing::instrument(skip_all)] + pub(crate) async fn fetch_required_signing_keys( + &self, + event: &BTreeMap, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let signatures = event + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let fetch_res = fetch_signing_keys( + signature_server.as_str().try_into().map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?, + signature_ids, + ) + .await; + + let keys = match fetch_res { + Ok(keys) => keys, + Err(_) => { + warn!("Signature verification failed: Could not fetch signing key.",); + continue; + } + }; + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(signature_server.clone(), keys); + } + + Ok(()) + } + + // Gets a list of servers for which we don't have the signing key yet. We go over + // the PDUs and either cache the key or add it to the list that needs to be retrieved. + fn get_server_keys_from_cache( + &self, + pdu: &RawJsonValue, + servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, + room_version: &RoomVersionId, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + ) -> Result<()> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&value, room_version) + .expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); + + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + let signatures = value + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?; + + if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { + continue; + } + + trace!("Loading signing keys for {}", origin); + + let result: BTreeMap<_, _> = services() + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if !contains_all_ids(&result) { + trace!("Signing key not loaded for {}", origin); + servers.insert(origin.to_owned(), BTreeMap::new()); + } + + pub_key_map.insert(origin.to_string(), result); + } + + Ok(()) + } + + pub(crate) async fn fetch_join_signing_keys( + &self, + event: &create_join_event::v2::Response, + room_version: &RoomVersionId, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = + BTreeMap::new(); + + { + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + + // Try to fetch keys, failure is okay + // Servers we couldn't find in the cache will be added to `servers` + for pdu in &event.room_state.state { + let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); + } + for pdu in &event.room_state.auth_chain { + let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); + } + + drop(pkm); + } + + if servers.is_empty() { + // We had all keys locally + return Ok(()); + } + + for server in services().globals.trusted_servers() { + trace!("Asking batch signing keys from trusted server {}", server); + if let Ok(keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys_batch::v2::Request { + server_keys: servers.clone(), + }, + ) + .await + { + trace!("Got signing keys: {:?}", keys); + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + for k in keys.server_keys { + let k = k.deserialize().unwrap(); + + // TODO: Check signature from trusted server? + servers.remove(&k.server_name); + + let result = services() + .globals + .add_signing_key(&k.server_name, k.clone())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + pkm.insert(k.server_name.to_string(), result); + } + } + + if servers.is_empty() { + return Ok(()); + } + } + + let mut futures: FuturesUnordered<_> = servers + .into_iter() + .map(|(server, _)| async move { + ( + services().sending + .send_federation_request( + &server, + get_server_keys::v2::Request::new(), + ) + .await, + server, + ) + }) + .collect(); + + while let Some(result) = futures.next().await { + if let (Ok(get_keys_response), origin) = result { + let result: BTreeMap<_, _> = services() + .globals + .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(origin.to_string(), result); + } + } + + Ok(()) + } + + /// Returns Ok if the acl allows the server + pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { + let acl_event = match services() + .rooms.state_accessor + .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? + { + Some(acl) => acl, + None => return Ok(()), + }; + + let acl_event_content: RoomServerAclEventContent = + match serde_json::from_str(acl_event.content.get()) { + Ok(content) => content, + Err(_) => { + warn!("Invalid ACL event"); + return Ok(()); + } + }; + + if acl_event_content.is_allowed(server_name) { + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server was denied by ACL", + )) + } + } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index e6b5ce20..a26ed46b 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; pub use data::Data; use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType}, UserId, EventId, serde::Raw, RoomVersionId}; @@ -85,7 +85,7 @@ impl Service { event_id: &EventId, room_id: &RoomId, state_ids_compressed: HashSet, - ) -> Result<()> { + ) -> Result { let shorteventid = services().short.get_or_create_shorteventid(event_id)?; let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; @@ -132,7 +132,7 @@ impl Service { self.db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - Ok(()) + Ok(shortstatehash) } /// Generates a new StateHash and associates it with the incoming event. @@ -279,4 +279,8 @@ impl Service { pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.db.get_room_shortstatehash(room_id) } + + pub fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { + self.db.get_forward_extremities(room_id) + } } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 09f66ddf..7669b0b3 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,7 +1,7 @@ mod data; use std::borrow::Cow; use std::sync::Arc; -use std::{sync::MutexGuard, iter, collections::HashSet}; +use std::{iter, collections::HashSet}; use std::fmt::Debug; pub use data::Data; @@ -13,6 +13,7 @@ use ruma::state_res::RoomVersion; use ruma::{EventId, signatures::CanonicalJsonObject, push::{Action, Tweak}, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType, RoomEventType, room::{member::MembershipState, create::RoomCreateEventContent}, StateEventType}, UserId, RoomAliasId, RoomId, uint, state_res, api::client::error::ErrorKind, serde::to_canonical_value, ServerName}; use serde::Deserialize; use serde_json::value::to_raw_value; +use tokio::sync::MutexGuard; use tracing::{warn, error}; use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduEvent, utils}; @@ -460,7 +461,7 @@ impl Service { sender: &UserId, room_id: &RoomId, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> (PduEvent, CanonicalJsonObject) { + ) -> Result<(PduEvent, CanonicalJsonObject)> { let PduBuilder { event_type, content, @@ -471,7 +472,8 @@ impl Service { let prev_events: Vec<_> = services() .rooms - .get_pdu_leaves(room_id)? + .state + .get_forward_extremities(room_id)? .into_iter() .take(20) .collect(); @@ -622,6 +624,8 @@ impl Service { // Generate short event id let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id)?; + + Ok((pdu, pdu_json)) } /// Creates a new persisted data unit and adds it to a room. This function takes a @@ -634,7 +638,7 @@ impl Service { room_id: &RoomId, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock); + let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. From face766e0f32481fd97a435f1ed8579d8cfc634c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 12:45:54 +0200 Subject: [PATCH 387/445] messing with trait objects --- src/api/client_server/membership.rs | 6 +- src/api/client_server/room.rs | 4 +- src/api/client_server/sync.rs | 2 +- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 175 +---------- src/database/key_value/globals.rs | 8 +- src/database/key_value/media.rs | 4 +- src/database/key_value/pusher.rs | 2 +- src/database/key_value/rooms/alias.rs | 6 +- src/database/key_value/rooms/auth_chain.rs | 6 +- src/database/key_value/rooms/edus/presence.rs | 2 + .../key_value/rooms/edus/read_receipt.rs | 4 +- src/database/key_value/rooms/lazy_load.rs | 29 +- src/database/key_value/rooms/metadata.rs | 4 +- src/database/key_value/rooms/search.rs | 10 +- .../key_value/rooms/state_accessor.rs | 26 +- .../key_value/rooms/state_compressor.rs | 4 +- src/database/key_value/rooms/timeline.rs | 20 +- src/database/key_value/rooms/user.rs | 6 +- src/database/key_value/users.rs | 19 +- src/database/mod.rs | 296 ++++++++++-------- src/lib.rs | 14 +- src/main.rs | 17 +- src/service/account_data/mod.rs | 6 +- src/service/appservice/mod.rs | 6 +- src/service/globals/mod.rs | 8 +- src/service/key_backups/mod.rs | 6 +- src/service/media/mod.rs | 6 +- src/service/mod.rs | 44 ++- src/service/pusher/mod.rs | 6 +- src/service/rooms/alias/data.rs | 2 +- src/service/rooms/alias/mod.rs | 6 +- src/service/rooms/auth_chain/data.rs | 2 +- src/service/rooms/auth_chain/mod.rs | 6 +- src/service/rooms/directory/mod.rs | 6 +- src/service/rooms/edus/mod.rs | 8 +- src/service/rooms/edus/presence/mod.rs | 6 +- src/service/rooms/edus/read_receipt/mod.rs | 6 +- src/service/rooms/edus/typing/mod.rs | 6 +- src/service/rooms/event_handler/mod.rs | 187 ++++++++++- src/service/rooms/lazy_loading/data.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 23 +- src/service/rooms/metadata/mod.rs | 6 +- src/service/rooms/mod.rs | 34 +- src/service/rooms/outlier/mod.rs | 6 +- src/service/rooms/pdu_metadata/mod.rs | 6 +- src/service/rooms/search/data.rs | 2 +- src/service/rooms/search/mod.rs | 11 +- src/service/rooms/short/mod.rs | 6 +- src/service/rooms/state/data.rs | 3 +- src/service/rooms/state/mod.rs | 19 +- src/service/rooms/state_accessor/mod.rs | 6 +- src/service/rooms/state_cache/mod.rs | 6 +- src/service/rooms/state_compressor/data.rs | 8 +- src/service/rooms/state_compressor/mod.rs | 6 +- src/service/rooms/timeline/mod.rs | 6 +- src/service/rooms/user/mod.rs | 6 +- src/service/transaction_ids/mod.rs | 6 +- src/service/uiaa/mod.rs | 6 +- src/service/users/data.rs | 10 +- src/service/users/mod.rs | 6 +- 61 files changed, 623 insertions(+), 544 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 98931f25..720c1e64 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -481,7 +481,7 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if services().rooms.metadata.is_supported_version(&room_version) => room_version, + Some(room_version) if services().globals.supported_room_versions().contains(&room_version) => room_version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -568,7 +568,7 @@ async fn join_room_by_id_helper( let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); - server_server::fetch_join_signing_keys( + services().rooms.event_handler.fetch_join_signing_keys( &send_join_response, &room_version, &pub_key_map, @@ -1048,7 +1048,7 @@ async fn remote_leave_room( let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(version) if services().rooms.is_supported_version(&version) => version, + Some(version) if services().globals.supported_room_versions().contains(&version) => version, _ => return Err(Error::BadServerResponse("Room version is not supported")), }; diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index a7fa9520..939fbaa2 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -99,7 +99,7 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if services().rooms.is_supported_version(&services(), &room_version) { + if services().globals.supported_room_versions().contains(&room_version) { room_version } else { return Err(Error::BadRequest( @@ -470,7 +470,7 @@ pub async fn upgrade_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.is_supported_version(&body.new_version) { + if !services().globals.supported_room_versions().contains(&body.new_version) { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index e38ea600..3489a9a9 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -175,7 +175,7 @@ async fn sync_helper( services().rooms.edus.presence.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them - let watcher = services().watch(&sender_user, &sender_device); + let watcher = services().globals.db.watch(&sender_user, &sender_device); let next_batch = services().globals.current_count()?; let next_batch_string = next_batch.to_string(); diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index babf2a74..d926b89b 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -197,7 +197,7 @@ where request_map.insert("content".to_owned(), json_body.clone()); }; - let keys_result = server_server::fetch_signing_keys( + let keys_result = services().rooms.event_handler.fetch_signing_keys( &x_matrix.origin, vec![x_matrix.key.to_owned()], ) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9aa2beb9..45d749d0 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -664,7 +664,7 @@ pub async fn send_transaction_message_route( Some(id) => id, None => { // Event is invalid - resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned())); + resolved_map.insert(event_id, Err(Error::bad_database("Event needs a valid RoomId."))); continue; } }; @@ -707,7 +707,7 @@ pub async fn send_transaction_message_route( for pdu in &resolved_map { if let Err(e) = pdu.1 { - if e != "Room is unknown to this server." { + if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { warn!("Incoming PDU failed {:?}", pdu); } } @@ -854,170 +854,7 @@ pub async fn send_transaction_message_route( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map }) -} - -/// Search the DB for the signing keys of the given server, if we don't have them -/// fetch them from the server and save to our DB. -#[tracing::instrument(skip_all)] -pub(crate) async fn fetch_signing_keys( - origin: &ServerName, - signature_ids: Vec, -) -> Result> { - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let permit = services() - .globals - .servername_ratelimiter - .read() - .unwrap() - .get(origin) - .map(|s| Arc::clone(s).acquire_owned()); - - let permit = match permit { - Some(p) => p, - None => { - let mut write = services().globals.servername_ratelimiter.write().unwrap(); - let s = Arc::clone( - write - .entry(origin.to_owned()) - .or_insert_with(|| Arc::new(Semaphore::new(1))), - ); - - s.acquire_owned() - } - } - .await; - - let back_off = |id| match services() - .globals - .bad_signature_ratelimiter - .write() - .unwrap() - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - if let Some((time, tries)) = services() - .globals - .bad_signature_ratelimiter - .read() - .unwrap() - .get(&signature_ids) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {:?}", signature_ids); - return Err(Error::BadServerResponse("bad signature, still backing off")); - } - } - - trace!("Loading signing keys for {}", origin); - - let mut result: BTreeMap<_, _> = services() - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if contains_all_ids(&result) { - return Ok(result); - } - - debug!("Fetching signing keys for {} over federation", origin); - - if let Some(server_key) = services() - .sending - .send_federation_request(origin, get_server_keys::v2::Request::new()) - .await - .ok() - .and_then(|resp| resp.server_key.deserialize().ok()) - { - services().globals.add_signing_key(origin, server_key.clone())?; - - result.extend( - server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - - if contains_all_ids(&result) { - return Ok(result); - } - } - - for server in services().globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Some(server_keys) = services() - .sending - .send_federation_request( - server, - get_remote_server_keys::v2::Request::new( - origin, - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ) - .expect("time is valid"), - ), - ) - .await - .ok() - .map(|resp| { - resp.server_keys - .into_iter() - .filter_map(|e| e.deserialize().ok()) - .collect::>() - }) - { - trace!("Got signing keys: {:?}", server_keys); - for k in server_keys { - services().globals.add_signing_key(origin, k.clone())?; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - - if contains_all_ids(&result) { - return Ok(result); - } - } - } - - drop(permit); - - back_off(signature_ids); - - warn!("Failed to find public key for server: {}", origin); - Err(Error::BadServerResponse( - "Failed to find public key for server", - )) + Ok(send_transaction_message::v1::Response { pdus: resolved_map.into_iter().map(|(e, r)| (e, r.map_err(|e| e.to_string()))).collect() }) } #[tracing::instrument(skip(starting_events))] @@ -1050,7 +887,7 @@ pub(crate) async fn get_auth_chain<'a>( } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&chunk_key)? { + if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? { hits += 1; full_auth_chain.extend(cached.iter().copied()); continue; @@ -1062,7 +899,7 @@ pub(crate) async fn get_auth_chain<'a>( let mut misses2 = 0; let mut i = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = services().rooms.auth_chain.get_auth_chain_from_cache(&[sevent_id])? { + if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? { hits2 += 1; chunk_cache.extend(cached.iter().copied()); } else { @@ -1689,7 +1526,7 @@ pub async fn create_invite_route( services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; - if !services().rooms.is_supported_version(&body.room_version) { + if !services().globals.supported_room_versions().contains(&body.room_version) { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 81e6ee1f..e6652290 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -4,10 +4,10 @@ use crate::{Result, service, database::KeyValueDatabase, Error, utils}; impl service::globals::Data for KeyValueDatabase { fn load_keypair(&self) -> Result { - let keypair_bytes = self.globals.get(b"keypair")?.map_or_else( + let keypair_bytes = self.global.get(b"keypair")?.map_or_else( || { let keypair = utils::generate_keypair(); - self.globals.insert(b"keypair", &keypair)?; + self.global.insert(b"keypair", &keypair)?; Ok::<_, Error>(keypair) }, |s| Ok(s.to_vec()), @@ -33,8 +33,10 @@ impl service::globals::Data for KeyValueDatabase { Ed25519KeyPair::from_der(key, version) .map_err(|_| Error::bad_database("Private or public keys are invalid.")) }); + + keypair } fn remove_keypair(&self) -> Result<()> { - self.globals.remove(b"keypair")? + self.global.remove(b"keypair") } } diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 90a5c590..a84cbd53 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,3 +1,5 @@ +use ruma::api::client::error::ErrorKind; + use crate::{database::KeyValueDatabase, service, Error, utils, Result}; impl service::media::Data for KeyValueDatabase { @@ -33,7 +35,7 @@ impl service::media::Data for KeyValueDatabase { prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::NotFound)?; + let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; let mut parts = key.rsplit(|&b| b == 0xff); diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 35c84638..b05e47be 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -55,6 +55,6 @@ impl service::pusher::Data for KeyValueDatabase { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) + Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k)) } } diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index c762defa..0aa8dd48 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -56,15 +56,15 @@ impl service::rooms::alias::Data for KeyValueDatabase { fn local_aliases_for_room( &self, room_id: &RoomId, - ) -> Result>> { + ) -> Box>>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { + Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? .try_into() .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) + })) } } diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 585d5626..888d472d 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -3,8 +3,8 @@ use std::{collections::HashSet, mem::size_of}; use crate::{service, database::KeyValueDatabase, Result, utils}; impl service::rooms::auth_chain::Data for KeyValueDatabase { - fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result> { - self.shorteventid_authchain + fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>> { + Ok(self.shorteventid_authchain .get(&shorteventid.to_be_bytes())? .map(|chain| { chain @@ -13,7 +13,7 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase { utils::u64_from_bytes(chunk).expect("byte length is correct") }) .collect() - }) + })) } fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()> { diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index fbbbff55..1477c28b 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -145,4 +145,6 @@ fn parse_presence_event(bytes: &[u8]) -> Result { .last_active_ago .map(|timestamp| current_timestamp - timestamp); } + + Ok(presence) } diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 42d250f7..a12e2653 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -64,7 +64,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { let mut first_possible_edu = prefix.clone(); first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - self.readreceiptid_readreceipt + Box::new(self.readreceiptid_readreceipt .iter_from(&first_possible_edu, false) .take_while(move |(k, _)| k.starts_with(&prefix2)) .map(move |(k, v)| { @@ -91,7 +91,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { serde_json::value::to_raw_value(&json).expect("json is valid raw value"), ), )) - }) + })) } fn private_read_set( diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index aaf14dd3..133e1d04 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -25,26 +25,19 @@ impl service::rooms::lazy_loading::Data for KeyValueDatabase { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - since: u64, + confirmed_user_ids: &mut dyn Iterator, ) -> Result<()> { - if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( - user_id.to_owned(), - device_id.to_owned(), - room_id.to_owned(), - since, - )) { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(device_id.as_bytes()); - prefix.push(0xff); - prefix.extend_from_slice(room_id.as_bytes()); - prefix.push(0xff); + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); - for ll_id in user_ids { - let mut key = prefix.clone(); - key.extend_from_slice(ll_id.as_bytes()); - self.lazyloadedids.insert(&key, &[])?; - } + for ll_id in confirmed_user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(ll_id.as_bytes()); + self.lazyloadedids.insert(&key, &[])?; } Ok(()) diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 0509cbb8..db2bc69b 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,10 +1,10 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, Result}; +use crate::{service, database::KeyValueDatabase, Result, services}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { + let prefix = match services().rooms.short.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), None => return Ok(false), }; diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 15937f6d..dfbdbc64 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -2,10 +2,10 @@ use std::mem::size_of; use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils, Result}; +use crate::{service, database::KeyValueDatabase, utils, Result, services}; impl service::rooms::search::Data for KeyValueDatabase { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()> { + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) @@ -27,7 +27,7 @@ impl service::rooms::search::Data for KeyValueDatabase { room_id: &RoomId, search_string: &str, ) -> Result>>, Vec)>> { - let prefix = self + let prefix = services().rooms.short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -60,11 +60,11 @@ impl service::rooms::search::Data for KeyValueDatabase { }) .map(|iter| { ( - iter.map(move |id| { + Box::new(iter.map(move |id| { let mut pduid = prefix_clone.clone(); pduid.extend_from_slice(&id); pduid - }), + })), words, ) })) diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 037b98fc..4d5bd4a1 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,13 +1,13 @@ use std::{collections::{BTreeMap, HashMap}, sync::Arc}; -use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result}; +use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result, services}; use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; #[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self + let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -15,7 +15,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = BTreeMap::new(); let mut i = 0; for compressed in full_state.into_iter() { - let parsed = self.parse_compressed_state_event(compressed)?; + let parsed = services().rooms.state_compressor.parse_compressed_state_event(compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -30,7 +30,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { &self, shortstatehash: u64, ) -> Result>> { - let full_state = self + let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -39,8 +39,8 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = HashMap::new(); let mut i = 0; for compressed in full_state { - let (_, eventid) = self.parse_compressed_state_event(compressed)?; - if let Some(pdu) = self.get_pdu(&eventid)? { + let (_, eventid) = services().rooms.state_compressor.parse_compressed_state_event(compressed)?; + if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( pdu.kind.to_string().into(), @@ -69,11 +69,11 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { + let shortstatekey = match services().rooms.short.get_shortstatekey(event_type, state_key)? { Some(s) => s, None => return Ok(None), }; - let full_state = self + let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -82,7 +82,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .into_iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) .and_then(|compressed| { - self.parse_compressed_state_event(compressed) + services().rooms.state_compressor.parse_compressed_state_event(compressed) .ok() .map(|(_, id)| id) })) @@ -96,7 +96,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { state_key: &str, ) -> Result>> { self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) + .map_or(Ok(None), |event_id| services().rooms.timeline.get_pdu(&event_id)) } /// Returns the state hash for this pdu. @@ -122,7 +122,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { &self, room_id: &RoomId, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { self.state_full(current_shortstatehash).await } else { Ok(HashMap::new()) @@ -136,7 +136,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { self.state_get_id(current_shortstatehash, event_type, state_key) } else { Ok(None) @@ -150,7 +150,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { self.state_get(current_shortstatehash, event_type, state_key) } else { Ok(None) diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index 23a7122b..aee1890c 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -39,8 +39,8 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { } fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { - let mut value = diff.parent.to_be_bytes().to_vec(); - for new in &diff.new { + let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); + for new in &diff.added { value.extend_from_slice(&new[..]); } diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index c42509e0..a3b6c17d 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -3,7 +3,7 @@ use std::{collections::hash_map, mem::size_of, sync::Arc}; use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject}; use tracing::error; -use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result}; +use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services}; impl service::rooms::timeline::Data for KeyValueDatabase { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { @@ -191,7 +191,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, since: u64, ) -> Result, PduEvent)>>>> { - let prefix = self + let prefix = services().rooms.short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -203,7 +203,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(self + Ok(Box::new(self .pduid_pdu .iter_from(&first_pdu_id, false) .take_while(move |(k, _)| k.starts_with(&prefix)) @@ -214,7 +214,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) - })) + }))) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -226,7 +226,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { until: u64, ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id - let prefix = self + let prefix = services().rooms.short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -239,7 +239,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(self + Ok(Box::new(self .pduid_pdu .iter_from(current, true) .take_while(move |(k, _)| k.starts_with(&prefix)) @@ -250,7 +250,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) - })) + }))) } fn pdus_after<'a>( @@ -260,7 +260,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { from: u64, ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id - let prefix = self + let prefix = services().rooms.short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -273,7 +273,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(self + Ok(Box::new(self .pduid_pdu .iter_from(current, false) .take_while(move |(k, _)| k.starts_with(&prefix)) @@ -284,6 +284,6 @@ impl service::rooms::timeline::Data for KeyValueDatabase { pdu.remove_transaction_id()?; } Ok((pdu_id, pdu)) - })) + }))) } } diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index d49bc1d7..66681e3c 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,6 +1,6 @@ use ruma::{UserId, RoomId}; -use crate::{service, database::KeyValueDatabase, utils, Error, Result}; +use crate::{service, database::KeyValueDatabase, utils, Error, Result, services}; impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -50,7 +50,7 @@ impl service::rooms::user::Data for KeyValueDatabase { token: u64, shortstatehash: u64, ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); + let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); @@ -60,7 +60,7 @@ impl service::rooms::user::Data for KeyValueDatabase { } fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); + let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 82e3bac6..338d8800 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -57,12 +57,12 @@ impl service::users::Data for KeyValueDatabase { /// Returns an iterator over all users on this homeserver. fn iter(&self) -> Box>>> { - self.userid_password.iter().map(|(bytes, _)| { + Box::new(self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) - }) + })) } /// Returns a list of local users as list of usernames. @@ -274,7 +274,7 @@ impl service::users::Data for KeyValueDatabase { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata - self.userdeviceid_metadata + Box::new(self.userdeviceid_metadata .scan_prefix(prefix) .map(|(bytes, _)| { Ok(utils::string_from_bytes( @@ -285,7 +285,7 @@ impl service::users::Data for KeyValueDatabase { ) .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? .into()) - }) + })) } /// Replaces the access token of one device. @@ -617,7 +617,7 @@ impl service::users::Data for KeyValueDatabase { let to = to.unwrap_or(u64::MAX); - self.keychangeid_userid + Box::new(self.keychangeid_userid .iter_from(&start, false) .take_while(move |(k, _)| { k.starts_with(&prefix) @@ -638,7 +638,7 @@ impl service::users::Data for KeyValueDatabase { Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - }) + })) } fn mark_device_key_update( @@ -646,9 +646,10 @@ impl service::users::Data for KeyValueDatabase { user_id: &UserId, ) -> Result<()> { let count = services().globals.next_count()?.to_be_bytes(); - for room_id in services().rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { + for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { // Don't send key updates to unencrypted rooms if services().rooms + .state_accessor .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() { @@ -882,12 +883,12 @@ impl service::users::Data for KeyValueDatabase { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - self.userdeviceid_metadata + Box::new(self.userdeviceid_metadata .scan_prefix(key) .map(|(_, bytes)| { serde_json::from_slice::(&bytes) .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - }) + })) } /// Creates a new sync filter. Returns the filter id. diff --git a/src/database/mod.rs b/src/database/mod.rs index 22bfef06..aa5c5839 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,7 +1,7 @@ pub mod abstraction; pub mod key_value; -use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}}; +use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms::{self, state_compressor::CompressedStateEvent}, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}, services, PduEvent, Services, SERVICES}; use abstraction::KeyValueDatabaseEngine; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -9,7 +9,7 @@ use lru_cache::LruCache; use ruma::{ events::{ push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, - GlobalAccountDataEvent, GlobalAccountDataEventType, + GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, DeviceId, EventId, RoomId, UserId, signatures::CanonicalJsonValue, @@ -151,6 +151,30 @@ pub struct KeyValueDatabase { //pub pusher: pusher::PushData, pub(super) senderkey_pusher: Arc, + + pub(super) cached_registrations: Arc>>, + pub(super) pdu_cache: Mutex, Arc>>, + pub(super) shorteventid_cache: Mutex>>, + pub(super) auth_chain_cache: Mutex, Arc>>>, + pub(super) eventidshort_cache: Mutex, u64>>, + pub(super) statekeyshort_cache: Mutex>, + pub(super) shortstatekey_cache: Mutex>, + pub(super) our_real_users_cache: RwLock, Arc>>>>, + pub(super) appservice_in_room_cache: RwLock, HashMap>>, + pub(super) lazy_load_waiting: + Mutex, Box, Box, u64), HashSet>>>, + pub(super) stateinfo_cache: Mutex< + LruCache< + u64, + Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + >, + >, + pub(super) lasttimelinecount_cache: Mutex, u64>>, } impl KeyValueDatabase { @@ -214,7 +238,7 @@ impl KeyValueDatabase { } /// Load an existing database or create a new one. - pub async fn load_or_create(config: &Config) -> Result>> { + pub async fn load_or_create(config: &Config) -> Result<()> { Self::check_db_setup(config)?; if !Path::new(&config.database_path).exists() { @@ -253,7 +277,7 @@ impl KeyValueDatabase { let (admin_sender, admin_receiver) = mpsc::unbounded_channel(); let (sending_sender, sending_receiver) = mpsc::unbounded_channel(); - let db = Self { + let db = Arc::new(Self { _db: builder.clone(), userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, @@ -345,18 +369,53 @@ impl KeyValueDatabase { senderkey_pusher: builder.open_tree("senderkey_pusher")?, global: builder.open_tree("global")?, server_signingkeys: builder.open_tree("server_signingkeys")?, - }; - // TODO: do this after constructing the db + cached_registrations: Arc::new(RwLock::new(HashMap::new())), + pdu_cache: Mutex::new(LruCache::new( + config + .pdu_cache_capacity + .try_into() + .expect("pdu cache capacity fits into usize"), + )), + auth_chain_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shorteventid_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + eventidshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shortstatekey_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + statekeyshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + our_real_users_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: RwLock::new(HashMap::new()), + lazy_load_waiting: Mutex::new(HashMap::new()), + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + lasttimelinecount_cache: Mutex::new(HashMap::new()), + + }); + + let services_raw = Services::build(Arc::clone(&db)); + + // This is the first and only time we initialize the SERVICE static + *SERVICES.write().unwrap() = Some(services_raw); + // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. - if guard.users.count()? > 0 { + if services().users.count()? > 0 { let conduit_user = - UserId::parse_with_server_name("conduit", guard.globals.server_name()) + UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is valid"); - if !guard.users.exists(&conduit_user)? { + if !services().users.exists(&conduit_user)? { error!( "The {} server user does not exist, and the database is not new.", conduit_user @@ -370,11 +429,10 @@ impl KeyValueDatabase { // If the database has any data, perform data migrations before starting let latest_database_version = 11; - if guard.users.count()? > 0 { - let db = &*guard; + if services().users.count()? > 0 { // MIGRATIONS - if db.globals.database_version()? < 1 { - for (roomserverid, _) in db.rooms.roomserverids.iter() { + if services().globals.database_version()? < 1 { + for (roomserverid, _) in db.roomserverids.iter() { let mut parts = roomserverid.split(|&b| b == 0xff); let room_id = parts.next().expect("split always returns one element"); let servername = match parts.next() { @@ -388,17 +446,17 @@ impl KeyValueDatabase { serverroomid.push(0xff); serverroomid.extend_from_slice(room_id); - db.rooms.serverroomids.insert(&serverroomid, &[])?; + db.serverroomids.insert(&serverroomid, &[])?; } - db.globals.bump_database_version(1)?; + services().globals.bump_database_version(1)?; warn!("Migration: 0 -> 1 finished"); } - if db.globals.database_version()? < 2 { + if services().globals.database_version()? < 2 { // We accidentally inserted hashed versions of "" into the db instead of just "" - for (userid, password) in db.users.userid_password.iter() { + for (userid, password) in db.userid_password.iter() { let password = utils::string_from_bytes(&password); let empty_hashed_password = password.map_or(false, |password| { @@ -406,59 +464,59 @@ impl KeyValueDatabase { }); if empty_hashed_password { - db.users.userid_password.insert(&userid, b"")?; + db.userid_password.insert(&userid, b"")?; } } - db.globals.bump_database_version(2)?; + services().globals.bump_database_version(2)?; warn!("Migration: 1 -> 2 finished"); } - if db.globals.database_version()? < 3 { + if services().globals.database_version()? < 3 { // Move media to filesystem - for (key, content) in db.media.mediaid_file.iter() { + for (key, content) in db.mediaid_file.iter() { if content.is_empty() { continue; } - let path = db.globals.get_media_file(&key); + let path = services().globals.get_media_file(&key); let mut file = fs::File::create(path)?; file.write_all(&content)?; - db.media.mediaid_file.insert(&key, &[])?; + db.mediaid_file.insert(&key, &[])?; } - db.globals.bump_database_version(3)?; + services().globals.bump_database_version(3)?; warn!("Migration: 2 -> 3 finished"); } - if db.globals.database_version()? < 4 { - // Add federated users to db as deactivated - for our_user in db.users.iter() { + if services().globals.database_version()? < 4 { + // Add federated users to services() as deactivated + for our_user in services().users.iter() { let our_user = our_user?; - if db.users.is_deactivated(&our_user)? { + if services().users.is_deactivated(&our_user)? { continue; } - for room in db.rooms.rooms_joined(&our_user) { - for user in db.rooms.room_members(&room?) { + for room in services().rooms.state_cache.rooms_joined(&our_user) { + for user in services().rooms.state_cache.room_members(&room?) { let user = user?; - if user.server_name() != db.globals.server_name() { + if user.server_name() != services().globals.server_name() { println!("Migration: Creating user {}", user); - db.users.create(&user, None)?; + services().users.create(&user, None)?; } } } } - db.globals.bump_database_version(4)?; + services().globals.bump_database_version(4)?; warn!("Migration: 3 -> 4 finished"); } - if db.globals.database_version()? < 5 { + if services().globals.database_version()? < 5 { // Upgrade user data store - for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() { + for (roomuserdataid, _) in db.roomuserdataid_accountdata.iter() { let mut parts = roomuserdataid.split(|&b| b == 0xff); let room_id = parts.next().unwrap(); let user_id = parts.next().unwrap(); @@ -470,30 +528,29 @@ impl KeyValueDatabase { key.push(0xff); key.extend_from_slice(event_type); - db.account_data - .roomusertype_roomuserdataid + db.roomusertype_roomuserdataid .insert(&key, &roomuserdataid)?; } - db.globals.bump_database_version(5)?; + services().globals.bump_database_version(5)?; warn!("Migration: 4 -> 5 finished"); } - if db.globals.database_version()? < 6 { + if services().globals.database_version()? < 6 { // Set room member count - for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { + for (roomid, _) in db.roomid_shortstatehash.iter() { let string = utils::string_from_bytes(&roomid).unwrap(); let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); - db.rooms.update_joined_count(room_id, &db)?; + services().rooms.state_cache.update_joined_count(room_id)?; } - db.globals.bump_database_version(6)?; + services().globals.bump_database_version(6)?; warn!("Migration: 5 -> 6 finished"); } - if db.globals.database_version()? < 7 { + if services().globals.database_version()? < 7 { // Upgrade state store let mut last_roomstates: HashMap, u64> = HashMap::new(); let mut current_sstatehash: Option = None; @@ -513,7 +570,7 @@ impl KeyValueDatabase { let states_parents = last_roomsstatehash.map_or_else( || Ok(Vec::new()), |&last_roomsstatehash| { - db.rooms.state_accessor.load_shortstatehash_info(dbg!(last_roomsstatehash)) + services().rooms.state_compressor.load_shortstatehash_info(dbg!(last_roomsstatehash)) }, )?; @@ -535,7 +592,7 @@ impl KeyValueDatabase { (current_state, HashSet::new()) }; - db.rooms.save_state_from_diff( + services().rooms.state_compressor.save_state_from_diff( dbg!(current_sstatehash), statediffnew, statediffremoved, @@ -544,7 +601,7 @@ impl KeyValueDatabase { )?; /* - let mut tmp = db.rooms.load_shortstatehash_info(¤t_sstatehash, &db)?; + let mut tmp = services().rooms.load_shortstatehash_info(¤t_sstatehash)?; let state = tmp.pop().unwrap(); println!( "{}\t{}{:?}: {:?} + {:?} - {:?}", @@ -587,14 +644,13 @@ impl KeyValueDatabase { current_sstatehash = Some(sstatehash); let event_id = db - .rooms .shorteventid_eventid .get(&seventid) .unwrap() .unwrap(); let string = utils::string_from_bytes(&event_id).unwrap(); let event_id = <&EventId>::try_from(string.as_str()).unwrap(); - let pdu = db.rooms.get_pdu(event_id).unwrap().unwrap(); + let pdu = services().rooms.timeline.get_pdu(event_id).unwrap().unwrap(); if Some(&pdu.room_id) != current_room.as_ref() { current_room = Some(pdu.room_id.clone()); @@ -615,20 +671,20 @@ impl KeyValueDatabase { )?; } - db.globals.bump_database_version(7)?; + services().globals.bump_database_version(7)?; warn!("Migration: 6 -> 7 finished"); } - if db.globals.database_version()? < 8 { + if services().globals.database_version()? < 8 { // Generate short room ids for all rooms - for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { - let shortroomid = db.globals.next_count()?.to_be_bytes(); - db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; + for (room_id, _) in db.roomid_shortstatehash.iter() { + let shortroomid = services().globals.next_count()?.to_be_bytes(); + db.roomid_shortroomid.insert(&room_id, &shortroomid)?; info!("Migration: 8"); } // Update pduids db layout - let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| { + let mut batch = db.pduid_pdu.iter().filter_map(|(key, v)| { if !key.starts_with(b"!") { return None; } @@ -637,7 +693,6 @@ impl KeyValueDatabase { let count = parts.next().unwrap(); let short_room_id = db - .rooms .roomid_shortroomid .get(room_id) .unwrap() @@ -649,9 +704,9 @@ impl KeyValueDatabase { Some((new_key, v)) }); - db.rooms.pduid_pdu.insert_batch(&mut batch)?; + db.pduid_pdu.insert_batch(&mut batch)?; - let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| { + let mut batch2 = db.eventid_pduid.iter().filter_map(|(k, value)| { if !value.starts_with(b"!") { return None; } @@ -660,7 +715,6 @@ impl KeyValueDatabase { let count = parts.next().unwrap(); let short_room_id = db - .rooms .roomid_shortroomid .get(room_id) .unwrap() @@ -672,17 +726,16 @@ impl KeyValueDatabase { Some((k, new_value)) }); - db.rooms.eventid_pduid.insert_batch(&mut batch2)?; + db.eventid_pduid.insert_batch(&mut batch2)?; - db.globals.bump_database_version(8)?; + services().globals.bump_database_version(8)?; warn!("Migration: 7 -> 8 finished"); } - if db.globals.database_version()? < 9 { + if services().globals.database_version()? < 9 { // Update tokenids db layout let mut iter = db - .rooms .tokenids .iter() .filter_map(|(key, _)| { @@ -696,7 +749,6 @@ impl KeyValueDatabase { let pdu_id_count = parts.next().unwrap(); let short_room_id = db - .rooms .roomid_shortroomid .get(room_id) .unwrap() @@ -712,8 +764,7 @@ impl KeyValueDatabase { .peekable(); while iter.peek().is_some() { - db.rooms - .tokenids + db.tokenids .insert_batch(&mut iter.by_ref().take(1000))?; println!("smaller batch done"); } @@ -721,7 +772,6 @@ impl KeyValueDatabase { info!("Deleting starts"); let batch2: Vec<_> = db - .rooms .tokenids .iter() .filter_map(|(key, _)| { @@ -736,38 +786,37 @@ impl KeyValueDatabase { for key in batch2 { println!("del"); - db.rooms.tokenids.remove(&key)?; + db.tokenids.remove(&key)?; } - db.globals.bump_database_version(9)?; + services().globals.bump_database_version(9)?; warn!("Migration: 8 -> 9 finished"); } - if db.globals.database_version()? < 10 { + if services().globals.database_version()? < 10 { // Add other direction for shortstatekeys - for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() { - db.rooms - .shortstatekey_statekey + for (statekey, shortstatekey) in db.statekey_shortstatekey.iter() { + db.shortstatekey_statekey .insert(&shortstatekey, &statekey)?; } // Force E2EE device list updates so we can send them over federation - for user_id in db.users.iter().filter_map(|r| r.ok()) { - db.users - .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; + for user_id in services().users.iter().filter_map(|r| r.ok()) { + services().users + .mark_device_key_update(&user_id)?; } - db.globals.bump_database_version(10)?; + services().globals.bump_database_version(10)?; warn!("Migration: 9 -> 10 finished"); } - if db.globals.database_version()? < 11 { + if services().globals.database_version()? < 11 { db._db .open_tree("userdevicesessionid_uiaarequest")? .clear()?; - db.globals.bump_database_version(11)?; + services().globals.bump_database_version(11)?; warn!("Migration: 10 -> 11 finished"); } @@ -779,12 +828,12 @@ impl KeyValueDatabase { config.database_backend, latest_database_version ); } else { - guard + services() .globals .bump_database_version(latest_database_version)?; // Create the admin room and server user on first run - create_admin_room().await?; + services().admin.create_admin_room().await?; warn!( "Created new {} database with version {}", @@ -793,16 +842,16 @@ impl KeyValueDatabase { } // This data is probably outdated - guard.rooms.edus.presenceid_presence.clear()?; + db.presenceid_presence.clear()?; - guard.admin.start_handler(Arc::clone(&db), admin_receiver); + services().admin.start_handler(admin_receiver); // Set emergency access for the conduit user - match set_emergency_access(&guard) { + match set_emergency_access() { Ok(pwd_set) => { if pwd_set { warn!("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!"); - guard.admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!")); + services().admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!")); } } Err(e) => { @@ -813,21 +862,19 @@ impl KeyValueDatabase { } }; - guard + services() .sending - .start_handler(Arc::clone(&db), sending_receiver); + .start_handler(sending_receiver); - drop(guard); + Self::start_cleanup_task(config).await; - Self::start_cleanup_task(Arc::clone(&db), config).await; - - Ok(db) + Ok(()) } #[cfg(feature = "conduit_bin")] - pub async fn on_shutdown(db: Arc>) { + pub async fn on_shutdown() { info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - db.read().await.globals.rotate.fire(); + services().globals.rotate.fire(); } pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { @@ -844,33 +891,30 @@ impl KeyValueDatabase { // Return when *any* user changed his key // TODO: only send for user they share a room with futures.push( - self.users - .todeviceid_events + self.todeviceid_events .watch_prefix(&userdeviceid_prefix), ); - futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix)); + futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); futures.push( - self.rooms - .userroomid_invitestate + self.userroomid_invitestate .watch_prefix(&userid_prefix), ); - futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix)); + futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); futures.push( - self.rooms - .userroomid_notificationcount + self.userroomid_notificationcount .watch_prefix(&userid_prefix), ); futures.push( - self.rooms - .userroomid_highlightcount + self.userroomid_highlightcount .watch_prefix(&userid_prefix), ); // Events for rooms we are in - for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - let short_roomid = self + for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { + let short_roomid = services() .rooms + .short .get_shortroomid(&room_id) .ok() .flatten() @@ -883,33 +927,28 @@ impl KeyValueDatabase { roomid_prefix.push(0xff); // PDUs - futures.push(self.rooms.pduid_pdu.watch_prefix(&short_roomid)); + futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); // EDUs futures.push( - self.rooms - .edus - .roomid_lasttypingupdate + self.roomid_lasttypingupdate .watch_prefix(&roomid_bytes), ); futures.push( - self.rooms - .edus - .readreceiptid_readreceipt + self.readreceiptid_readreceipt .watch_prefix(&roomid_prefix), ); // Key changes - futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix)); + futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); // Room account data let mut roomuser_prefix = roomid_prefix.clone(); roomuser_prefix.extend_from_slice(&userid_prefix); futures.push( - self.account_data - .roomusertype_roomuserdataid + self.roomusertype_roomuserdataid .watch_prefix(&roomuser_prefix), ); } @@ -918,22 +957,20 @@ impl KeyValueDatabase { globaluserdata_prefix.extend_from_slice(&userid_prefix); futures.push( - self.account_data - .roomusertype_roomuserdataid + self.roomusertype_roomuserdataid .watch_prefix(&globaluserdata_prefix), ); // More key changes (used when user is not joined to any rooms) - futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix)); + futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); // One time keys futures.push( - self.users - .userid_lastonetimekeyupdate + self.userid_lastonetimekeyupdate .watch_prefix(&userid_bytes), ); - futures.push(Box::pin(self.globals.rotate.watch())); + futures.push(Box::pin(services().globals.rotate.watch())); // Wait until one of them finds something futures.next().await; @@ -950,8 +987,8 @@ impl KeyValueDatabase { res } - #[tracing::instrument(skip(db, config))] - pub async fn start_cleanup_task(db: Arc>, config: &Config) { + #[tracing::instrument(skip(config))] + pub async fn start_cleanup_task(config: &Config) { use tokio::time::interval; #[cfg(unix)] @@ -984,7 +1021,7 @@ impl KeyValueDatabase { } let start = Instant::now(); - if let Err(e) = db.read().await._db.cleanup() { + if let Err(e) = services().globals.db._db.cleanup() { error!("cleanup: Errored: {}", e); } else { info!("cleanup: Finished in {:?}", start.elapsed()); @@ -995,26 +1032,25 @@ impl KeyValueDatabase { } /// Sets the emergency password and push rules for the @conduit account in case emergency password is set -fn set_emergency_access(db: &KeyValueDatabase) -> Result { - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) +fn set_emergency_access() -> Result { + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is a valid UserId"); - db.users - .set_password(&conduit_user, db.globals.emergency_password().as_deref())?; + services().users + .set_password(&conduit_user, services().globals.emergency_password().as_deref())?; - let (ruleset, res) = match db.globals.emergency_password() { + let (ruleset, res) = match services().globals.emergency_password() { Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), None => (Ruleset::new(), Ok(false)), }; - db.account_data.update( + services().account_data.update( None, &conduit_user, GlobalAccountDataEventType::PushRules.to_string().into(), &GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, }, - &db.globals, )?; res diff --git a/src/lib.rs b/src/lib.rs index 72399003..75cf6c7e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,22 +13,16 @@ mod service; pub mod api; mod utils; -use std::{cell::Cell, sync::RwLock}; +use std::{cell::Cell, sync::{RwLock, Arc}}; pub use config::Config; pub use utils::error::{Error, Result}; pub use service::{Services, pdu::PduEvent}; pub use api::ruma_wrapper::{Ruma, RumaResponse}; -use crate::database::KeyValueDatabase; +pub static SERVICES: RwLock>> = RwLock::new(None); -pub static SERVICES: RwLock> = RwLock::new(None); - -enum ServicesEnum { - Rocksdb(Services) -} - -pub fn services<'a>() -> &'a Services { - &SERVICES.read().unwrap() +pub fn services<'a>() -> Arc { + Arc::clone(&SERVICES.read().unwrap()) } diff --git a/src/main.rs b/src/main.rs index 543b953e..d5b2731e 100644 --- a/src/main.rs +++ b/src/main.rs @@ -69,19 +69,14 @@ async fn main() { config.warn_deprecated(); - let db = match KeyValueDatabase::load_or_create(&config).await { - Ok(db) => db, - Err(e) => { - eprintln!( - "The database couldn't be loaded or created. The following error occured: {}", - e - ); - std::process::exit(1); - } + if let Err(e) = KeyValueDatabase::load_or_create(&config).await { + eprintln!( + "The database couldn't be loaded or created. The following error occured: {}", + e + ); + std::process::exit(1); }; - SERVICES.set(db).expect("this is the first and only time we initialize the SERVICE static"); - let start = async { run_server().await.unwrap(); }; diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index c56c69d2..35ca1495 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -17,11 +17,11 @@ use tracing::error; use crate::{service::*, services, utils, Error, Result}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] pub fn update( diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 63fa3afe..1a5ce50c 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -3,11 +3,11 @@ pub use data::Data; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Registers an appservice and returns the ID to the caller pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { self.db.register_appservice(yaml) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 6cfeab81..48d7b064 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -36,8 +36,8 @@ type SyncHandle = ( Receiver>>, // rx ); -pub struct Service { - pub db: D, +pub struct Service { + pub db: Box, pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, @@ -92,9 +92,9 @@ impl Default for RotationHandler { } -impl Service { +impl Service { pub fn load( - db: D, + db: Box, config: Config, ) -> Result { let keypair = db.load_keypair(); diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index ce867fb5..4bd9efd3 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -12,11 +12,11 @@ use ruma::{ }; use std::{collections::BTreeMap, sync::Arc}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn create_backup( &self, user_id: &UserId, diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 5037809c..d61292bb 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -15,11 +15,11 @@ pub struct FileMeta { pub file: Vec, } -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Uploads a file. pub async fn create( &self, diff --git a/src/service/mod.rs b/src/service/mod.rs index 4364c72e..47d4651d 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + pub mod account_data; pub mod admin; pub mod appservice; @@ -12,18 +14,36 @@ pub mod transaction_ids; pub mod uiaa; pub mod users; -pub struct Services -{ - pub appservice: appservice::Service, - pub pusher: pusher::Service, - pub rooms: rooms::Service, - pub transaction_ids: transaction_ids::Service, - pub uiaa: uiaa::Service, - pub users: users::Service, - pub account_data: account_data::Service, +pub struct Services { + pub appservice: appservice::Service, + pub pusher: pusher::Service, + pub rooms: rooms::Service, + pub transaction_ids: transaction_ids::Service, + pub uiaa: uiaa::Service, + pub users: users::Service, + pub account_data: account_data::Service, pub admin: admin::Service, - pub globals: globals::Service, - pub key_backups: key_backups::Service, - pub media: media::Service, + pub globals: globals::Service, + pub key_backups: key_backups::Service, + pub media: media::Service, pub sending: sending::Service, } + +impl Services { + pub fn build(db: Arc) { + Self { + appservice: appservice::Service { db: Arc::clone(&db) }, + pusher: appservice::Service { db: Arc::clone(&db) }, + rooms: appservice::Service { db: Arc::clone(&db) }, + transaction_ids: appservice::Service { db: Arc::clone(&db) }, + uiaa: appservice::Service { db: Arc::clone(&db) }, + users: appservice::Service { db: Arc::clone(&db) }, + account_data: appservice::Service { db: Arc::clone(&db) }, + admin: appservice::Service { db: Arc::clone(&db) }, + globals: appservice::Service { db: Arc::clone(&db) }, + key_backups: appservice::Service { db: Arc::clone(&db) }, + media: appservice::Service { db: Arc::clone(&db) }, + sending: appservice::Service { db: Arc::clone(&db) }, + } + } +} diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 64c7f1fa..af30ca47 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -23,11 +23,11 @@ use ruma::{ use std::{fmt::Debug, mem}; use tracing::{error, info, warn}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { self.db.set_pusher(sender, pusher) } diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index c5d45e36..81022096 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -25,5 +25,5 @@ pub trait Data { fn local_aliases_for_room( &self, room_id: &RoomId, - ) -> Result>>; + ) -> Box>>>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index abe299d4..ef5888fc 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -4,11 +4,11 @@ pub use data::Data; use ruma::{RoomAliasId, RoomId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { #[tracing::instrument(skip(self))] pub fn set_alias( &self, diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 5177d6d6..e4e8550b 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -2,6 +2,6 @@ use std::collections::HashSet; use crate::Result; pub trait Data { - fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>; + fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>>; fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 9ea4763e..26a3f3f0 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -5,11 +5,11 @@ pub use data::Data; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { #[tracing::instrument(skip(self))] pub fn get_cached_eventid_authchain<'a>( &'a self, diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 68535057..fb289941 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -4,11 +4,11 @@ use ruma::RoomId; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { #[tracing::instrument(skip(self))] pub fn set_public(&self, room_id: &RoomId) -> Result<()> { self.db.set_public(room_id) diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index dbe1b6e8..8552363e 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -4,8 +4,8 @@ pub mod typing; pub trait Data: presence::Data + read_receipt::Data + typing::Data {} -pub struct Service { - pub presence: presence::Service, - pub read_receipt: read_receipt::Service, - pub typing: typing::Service, +pub struct Service { + pub presence: presence::Service, + pub read_receipt: read_receipt::Service, + pub typing: typing::Service, } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 646cf549..73b7b5a5 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -6,11 +6,11 @@ use ruma::{RoomId, UserId, events::presence::PresenceEvent}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 3f0b1476..2a4c0b7f 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -4,11 +4,11 @@ pub use data::Data; use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Replaces the previous read receipt. pub fn readreceipt_update( &self, diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 00cfdecb..16a135f8 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -4,11 +4,11 @@ use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 8a8725b8..e2291126 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,14 +1,16 @@ /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; +use ruma::{RoomVersionId, signatures::CanonicalJsonObject, api::federation::discovery::{get_server_keys, get_remote_server_keys}}; +use tokio::sync::Semaphore; use std::{ collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, pin::Pin, - sync::{Arc, RwLock}, - time::{Duration, Instant}, + sync::{Arc, RwLock, RwLockWriteGuard}, + time::{Duration, Instant, SystemTime}, }; -use futures_util::{Future, stream::FuturesUnordered}; +use futures_util::{Future, stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ client::error::ErrorKind, @@ -22,7 +24,7 @@ use ruma::{ uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use tracing::{error, info, trace, warn}; +use tracing::{error, info, trace, warn, debug}; use crate::{service::*, services, Result, Error, PduEvent}; @@ -53,7 +55,7 @@ impl Service { /// it /// 14. Use state resolution to find new room state // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively - #[tracing::instrument(skip(value, is_timeline_event, pub_key_map))] + #[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))] pub(crate) async fn handle_incoming_pdu<'a>( &self, origin: &'a ServerName, @@ -64,10 +66,11 @@ impl Service { pub_key_map: &'a RwLock>>, ) -> Result>> { if !services().rooms.metadata.exists(room_id)? { - return Error::BadRequest( + return Err(Error::BadRequest( ErrorKind::NotFound, "Room is unknown to this server", - )}; + )); + } services() .rooms @@ -732,7 +735,7 @@ impl Service { &incoming_pdu.sender, incoming_pdu.state_key.as_deref(), &incoming_pdu.content, - )? + )?; let soft_fail = !state_res::event_auth::auth_check( &room_version, @@ -821,7 +824,7 @@ impl Service { let shortstatekey = services() .rooms .short - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)? + .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?; state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } @@ -1236,7 +1239,7 @@ impl Service { let signature_ids = signature_object.keys().cloned().collect::>(); - let fetch_res = fetch_signing_keys( + let fetch_res = self.fetch_signing_keys( signature_server.as_str().try_into().map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") })?, @@ -1481,4 +1484,168 @@ impl Service { )) } } + + /// Search the DB for the signing keys of the given server, if we don't have them + /// fetch them from the server and save to our DB. + #[tracing::instrument(skip_all)] + pub async fn fetch_signing_keys( + &self, + origin: &ServerName, + signature_ids: Vec, + ) -> Result> { + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let permit = services() + .globals + .servername_ratelimiter + .read() + .unwrap() + .get(origin) + .map(|s| Arc::clone(s).acquire_owned()); + + let permit = match permit { + Some(p) => p, + None => { + let mut write = services().globals.servername_ratelimiter.write().unwrap(); + let s = Arc::clone( + write + .entry(origin.to_owned()) + .or_insert_with(|| Arc::new(Semaphore::new(1))), + ); + + s.acquire_owned() + } + } + .await; + + let back_off = |id| match services() + .globals + .bad_signature_ratelimiter + .write() + .unwrap() + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + if let Some((time, tries)) = services() + .globals + .bad_signature_ratelimiter + .read() + .unwrap() + .get(&signature_ids) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {:?}", signature_ids); + return Err(Error::BadServerResponse("bad signature, still backing off")); + } + } + + trace!("Loading signing keys for {}", origin); + + let mut result: BTreeMap<_, _> = services() + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if contains_all_ids(&result) { + return Ok(result); + } + + debug!("Fetching signing keys for {} over federation", origin); + + if let Some(server_key) = services() + .sending + .send_federation_request(origin, get_server_keys::v2::Request::new()) + .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) + { + services().globals.add_signing_key(origin, server_key.clone())?; + + result.extend( + server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + + for server in services().globals.trusted_servers() { + debug!("Asking {} for {}'s signing key", server, origin); + if let Some(server_keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys::v2::Request::new( + origin, + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) + { + trace!("Got signing keys: {:?}", server_keys); + for k in server_keys { + services().globals.add_signing_key(origin, k.clone())?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); + } + } + } + + drop(permit); + + back_off(signature_ids); + + warn!("Failed to find public key for server: {}", origin); + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) + } } diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 5fefd3f8..f1019c13 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -15,7 +15,7 @@ pub trait Data { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - since: u64, + confirmed_user_ids: &mut dyn Iterator, ) -> Result<()>; fn lazy_load_reset( diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 283d45af..90dad21c 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,16 +1,18 @@ mod data; -use std::collections::HashSet; +use std::{collections::{HashSet, HashMap}, sync::Mutex}; pub use data::Data; use ruma::{DeviceId, UserId, RoomId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, + + lazy_load_waiting: Mutex, Box, Box, u64), HashSet>>>, } -impl Service { +impl Service { #[tracing::instrument(skip(self))] pub fn lazy_load_was_sent_before( &self, @@ -50,7 +52,18 @@ impl Service { room_id: &RoomId, since: u64, ) -> Result<()> { - self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, since) + if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + since, + )) { + self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, &mut user_ids.iter().map(|&u| &*u))?; + } else { + // Ignore + } + + Ok(()) } #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 1bdb78d6..3c21dd19 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -4,11 +4,11 @@ use ruma::RoomId; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn exists(&self, room_id: &RoomId) -> Result { diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index 4da42236..f1b0badf 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -18,22 +18,22 @@ pub mod user; pub trait Data: alias::Data + auth_chain::Data + directory::Data + edus::Data + lazy_loading::Data + metadata::Data + outlier::Data + pdu_metadata::Data + search::Data + short::Data + state::Data + state_accessor::Data + state_cache::Data + state_compressor::Data + timeline::Data + user::Data {} -pub struct Service { - pub alias: alias::Service, - pub auth_chain: auth_chain::Service, - pub directory: directory::Service, - pub edus: edus::Service, +pub struct Service { + pub alias: alias::Service, + pub auth_chain: auth_chain::Service, + pub directory: directory::Service, + pub edus: edus::Service, pub event_handler: event_handler::Service, - pub lazy_loading: lazy_loading::Service, - pub metadata: metadata::Service, - pub outlier: outlier::Service, - pub pdu_metadata: pdu_metadata::Service, - pub search: search::Service, - pub short: short::Service, - pub state: state::Service, - pub state_accessor: state_accessor::Service, - pub state_cache: state_cache::Service, - pub state_compressor: state_compressor::Service, - pub timeline: timeline::Service, - pub user: user::Service, + pub lazy_loading: lazy_loading::Service, + pub metadata: metadata::Service, + pub outlier: outlier::Service, + pub pdu_metadata: pdu_metadata::Service, + pub search: search::Service, + pub short: short::Service, + pub state: state::Service, + pub state_accessor: state_accessor::Service, + pub state_cache: state_cache::Service, + pub state_compressor: state_compressor::Service, + pub timeline: timeline::Service, + pub user: user::Service, } diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index a495db8f..5493ce48 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -4,11 +4,11 @@ use ruma::{EventId, signatures::CanonicalJsonObject}; use crate::{Result, PduEvent}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Returns the pdu from the outlier tree. pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.db.get_outlier_pdu_json(event_id) diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index c57c1a28..a81d05c1 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -6,11 +6,11 @@ use ruma::{RoomId, EventId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { #[tracing::instrument(skip(self, room_id, event_ids))] pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { self.db.mark_as_referenced(room_id, event_ids) diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index c0fd2a37..b62904c1 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -2,7 +2,7 @@ use ruma::RoomId; use crate::Result; pub trait Data { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: u64, message_body: String) -> Result<()>; + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>; fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index b7023f32..dc571910 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -4,11 +4,16 @@ pub use data::Data; use crate::Result; use ruma::RoomId; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { + #[tracing::instrument(skip(self))] + pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { + self.db.index_pdu(shortroomid, pdu_id, message_body) + } + #[tracing::instrument(skip(self))] pub fn search_pdus<'a>( &'a self, diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 1eb891e6..a024dc67 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -6,11 +6,11 @@ use ruma::{EventId, events::StateEventType, RoomId}; use crate::{Result, Error, utils, services}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn get_or_create_shorteventid( &self, event_id: &EventId, diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index fd0de282..7008d86f 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,6 +1,5 @@ use std::sync::Arc; use std::{sync::MutexGuard, collections::HashSet}; -use std::fmt::Debug; use crate::Result; use ruma::{EventId, RoomId}; @@ -22,7 +21,7 @@ pub trait Data { /// Replace the forward extremities of the room. fn set_forward_extremities<'a>(&self, room_id: &RoomId, - event_ids: impl IntoIterator + Debug, + event_ids: &dyn Iterator, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index a26ed46b..979060d9 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -10,11 +10,11 @@ use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; use super::state_compressor::CompressedStateEvent; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Set the room to the given statehash and update caches. pub fn force_state( &self, @@ -23,6 +23,15 @@ impl Service { statediffnew: HashSet, statediffremoved: HashSet, ) -> Result<()> { + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(body.room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; for event_id in statediffnew.into_iter().filter_map(|new| { services().rooms.state_compressor.parse_compressed_state_event(new) @@ -70,7 +79,9 @@ impl Service { services().room.state_cache.update_joined_count(room_id)?; - self.db.set_room_state(room_id, shortstatehash); + self.db.set_room_state(room_id, shortstatehash, &state_lock); + + drop(state_lock); Ok(()) } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 5d6886d9..1911e52f 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -6,11 +6,11 @@ use ruma::{events::StateEventType, RoomId, EventId}; use crate::{Result, PduEvent}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index c3b4eb91..18d1123e 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -7,11 +7,11 @@ use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::Room use crate::{Result, services, utils, Error}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Update current membership data. #[tracing::instrument(skip(self, last_state))] pub fn update_membership( diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index 17689364..cd872422 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -1,10 +1,12 @@ +use std::collections::HashSet; + use super::CompressedStateEvent; use crate::Result; pub struct StateDiff { - parent: Option, - added: Vec, - removed: Vec, + pub parent: Option, + pub added: HashSet, + pub removed: HashSet, } pub trait Data { diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 619e4cf5..ab9f4275 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -8,13 +8,13 @@ use crate::{Result, utils, services}; use self::data::StateDiff; -pub struct Service { - db: D, +pub struct Service { + db: Box, } pub type CompressedStateEvent = [u8; 2 * size_of::()]; -impl Service { +impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 7669b0b3..e8f42053 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -20,11 +20,11 @@ use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduE use super::state_compressor::CompressedStateEvent; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /* /// Checks if a room exists. #[tracing::instrument(skip(self))] diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 729887c3..7c7dfae6 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -4,11 +4,11 @@ use ruma::{RoomId, UserId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { self.db.reset_notification_counts(user_id, room_id) } diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index ea923722..a9c516cf 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -4,11 +4,11 @@ pub use data::Data; use ruma::{UserId, DeviceId, TransactionId}; use crate::Result; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { pub fn add_txnid( &self, user_id: &UserId, diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index ffdbf356..01c0d2f6 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -6,11 +6,11 @@ use tracing::error; use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Creates a new Uiaa session. Make sure the session token is unique. pub fn create( &self, diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 3f87589c..7eb0cebd 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -2,7 +2,7 @@ use std::collections::BTreeMap; use crate::Result; use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri}; -pub trait Data { +pub trait Data: Send + Sync { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result; @@ -138,16 +138,16 @@ pub trait Data { device_id: &DeviceId, ) -> Result>>; - fn get_master_key bool>( + fn get_master_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; - fn get_self_signing_key bool>( + fn get_self_signing_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>>; fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index dfe6c7fb..8adc9366 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -6,11 +6,11 @@ use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTi use crate::{Result, Error, services}; -pub struct Service { - db: D, +pub struct Service { + db: Box, } -impl Service { +impl Service { /// Check if a user has an account on this homeserver. pub fn exists(&self, user_id: &UserId) -> Result { self.db.exists(user_id) From cff52d7ebb5066f3d8e513488b84a431c0093e65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 15:33:57 +0200 Subject: [PATCH 388/445] messing around with arcs --- src/api/client_server/account.rs | 4 +- src/api/client_server/media.rs | 12 +- src/api/client_server/membership.rs | 5 +- src/api/client_server/push.rs | 67 ++++-- src/api/client_server/read_marker.rs | 2 +- src/api/client_server/sync.rs | 2 +- src/api/client_server/tag.rs | 61 ++--- src/api/server_server.rs | 4 +- src/database/key_value/account_data.rs | 17 +- src/database/key_value/appservice.rs | 2 + src/database/key_value/globals.rs | 211 +++++++++++++++++- src/database/key_value/key_backups.rs | 4 +- src/database/key_value/media.rs | 6 +- src/database/key_value/pusher.rs | 4 +- src/database/key_value/rooms/alias.rs | 4 +- src/database/key_value/rooms/auth_chain.rs | 74 ++++-- src/database/key_value/rooms/directory.rs | 4 +- src/database/key_value/rooms/edus/mod.rs | 4 +- src/database/key_value/rooms/edus/presence.rs | 4 +- .../key_value/rooms/edus/read_receipt.rs | 4 +- src/database/key_value/rooms/edus/typing.rs | 4 +- src/database/key_value/rooms/lazy_load.rs | 4 +- src/database/key_value/rooms/metadata.rs | 4 +- src/database/key_value/rooms/mod.rs | 4 +- src/database/key_value/rooms/outlier.rs | 4 +- src/database/key_value/rooms/pdu_metadata.rs | 2 +- src/database/key_value/rooms/search.rs | 4 +- src/database/key_value/rooms/short.rs | 4 +- src/database/key_value/rooms/state.rs | 7 +- .../key_value/rooms/state_accessor.rs | 2 +- src/database/key_value/rooms/state_cache.rs | 4 +- .../key_value/rooms/state_compressor.rs | 4 +- src/database/key_value/rooms/timeline.rs | 2 +- src/database/key_value/rooms/user.rs | 4 +- src/database/key_value/transaction_ids.rs | 4 +- src/database/key_value/uiaa.rs | 4 +- src/database/key_value/users.rs | 12 +- src/database/mod.rs | 109 +-------- src/lib.rs | 6 +- src/service/account_data/data.rs | 11 +- src/service/account_data/mod.rs | 8 +- src/service/admin/mod.rs | 27 ++- src/service/appservice/data.rs | 2 +- src/service/appservice/mod.rs | 4 +- src/service/globals/data.rs | 26 ++- src/service/globals/mod.rs | 81 ++----- src/service/key_backups/data.rs | 2 +- src/service/media/data.rs | 4 +- src/service/media/mod.rs | 14 +- src/service/mod.rs | 26 +-- src/service/pdu.rs | 2 +- src/service/pusher/data.rs | 2 +- src/service/pusher/mod.rs | 13 +- src/service/rooms/alias/data.rs | 2 +- src/service/rooms/auth_chain/data.rs | 8 +- src/service/rooms/auth_chain/mod.rs | 34 +-- src/service/rooms/directory/data.rs | 2 +- src/service/rooms/edus/presence/data.rs | 2 +- src/service/rooms/edus/read_receipt/data.rs | 2 +- src/service/rooms/edus/typing/data.rs | 2 +- src/service/rooms/event_handler/mod.rs | 26 ++- src/service/rooms/lazy_loading/data.rs | 2 +- src/service/rooms/metadata/data.rs | 2 +- src/service/rooms/outlier/data.rs | 2 +- src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/search/data.rs | 2 +- src/service/rooms/short/data.rs | 2 +- src/service/rooms/state/data.rs | 7 +- src/service/rooms/state/mod.rs | 8 +- src/service/rooms/state_accessor/data.rs | 2 +- src/service/rooms/state_cache/data.rs | 2 +- src/service/rooms/state_compressor/data.rs | 2 +- src/service/rooms/timeline/data.rs | 2 +- src/service/rooms/user/data.rs | 2 +- src/service/transaction_ids/data.rs | 2 +- src/service/uiaa/data.rs | 2 +- src/service/users/mod.rs | 8 +- 77 files changed, 598 insertions(+), 434 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 6af597e1..6d37ce99 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -184,11 +184,11 @@ pub async fn register_route( None, &user_id, GlobalAccountDataEventType::PushRules.to_string().into(), - &ruma::events::push_rules::PushRulesEvent { + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { global: push::Ruleset::server_default(&user_id), }, - }, + }).expect("to json always works"), )?; // Inhibit login does not work for guests diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 316e284b..80cbb613 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -40,12 +40,12 @@ pub async fn create_content_route( services().media .create( mxc.clone(), - &body + body .filename .as_ref() .map(|filename| "inline; filename=".to_owned() + filename) .as_deref(), - &body.content_type.as_deref(), + body.content_type.as_deref(), &body.file, ) .await?; @@ -76,8 +76,8 @@ pub async fn get_remote_content( services().media .create( mxc.to_string(), - &content_response.content_disposition.as_deref(), - &content_response.content_type.as_deref(), + content_response.content_disposition.as_deref(), + content_response.content_type.as_deref(), &content_response.file, ) .await?; @@ -195,8 +195,8 @@ pub async fn get_content_thumbnail_route( services().media .upload_thumbnail( mxc, - &None, - &get_thumbnail_response.content_type.as_deref(), + None, + get_thumbnail_response.content_type.as_deref(), body.width.try_into().expect("all UInts are valid u32s"), body.height.try_into().expect("all UInts are valid u32s"), &get_thumbnail_response.file, diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 720c1e64..58ed0401 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -860,9 +860,8 @@ pub(crate) async fn invite_helper<'a>( "Could not accept incoming PDU as timeline event.", ))?; - let servers = services() - .rooms - .state_cache + // Bind to variable because of lifetimes + let servers = services().rooms.state_cache .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != services().globals.server_name()); diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index 112fa002..12ec25dd 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -20,7 +20,7 @@ pub async fn get_pushrules_all_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -32,8 +32,12 @@ pub async fn get_pushrules_all_route( "PushRules event not found.", ))?; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + Ok(get_pushrules_all::v3::Response { - global: event.content.global, + global: account_data.global, }) } @@ -45,7 +49,7 @@ pub async fn get_pushrule_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -57,7 +61,11 @@ pub async fn get_pushrule_route( "PushRules event not found.", ))?; - let global = event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + let global = account_data.global; let rule = match body.kind { RuleKind::Override => global .override_ @@ -108,7 +116,7 @@ pub async fn set_pushrule_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -120,7 +128,10 @@ pub async fn set_pushrule_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { global.override_.replace( @@ -187,7 +198,7 @@ pub async fn set_pushrule_route( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &event, + &serde_json::to_value(account_data).expect("to json value always works"), )?; Ok(set_pushrule::v3::Response {}) @@ -208,7 +219,7 @@ pub async fn get_pushrule_actions_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -220,7 +231,11 @@ pub async fn get_pushrule_actions_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + let global = account_data.global; let actions = match body.kind { RuleKind::Override => global .override_ @@ -265,7 +280,7 @@ pub async fn set_pushrule_actions_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -277,7 +292,10 @@ pub async fn set_pushrule_actions_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -316,7 +334,7 @@ pub async fn set_pushrule_actions_route( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &event, + &serde_json::to_value(account_data).expect("to json value always works"), )?; Ok(set_pushrule_actions::v3::Response {}) @@ -337,7 +355,7 @@ pub async fn get_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -349,7 +367,10 @@ pub async fn get_pushrule_enabled_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = account_data.content.global; let enabled = match body.kind { RuleKind::Override => global .override_ @@ -397,7 +418,7 @@ pub async fn set_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -409,7 +430,10 @@ pub async fn set_pushrule_enabled_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -453,7 +477,7 @@ pub async fn set_pushrule_enabled_route( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &event, + &serde_json::to_value(account_data).expect("to json value always works"), )?; Ok(set_pushrule_enabled::v3::Response {}) @@ -474,7 +498,7 @@ pub async fn delete_pushrule_route( )); } - let mut event: PushRulesEvent = services() + let event = services() .account_data .get( None, @@ -486,7 +510,10 @@ pub async fn delete_pushrule_route( "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -520,7 +547,7 @@ pub async fn delete_pushrule_route( None, sender_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &event, + &serde_json::to_value(account_data).expect("to json value always works"), )?; Ok(delete_pushrule::v3::Response {}) diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index eda57d57..c6d77c12 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -27,7 +27,7 @@ pub async fn set_read_marker_route( Some(&body.room_id), sender_user, RoomAccountDataEventType::FullyRead, - &fully_read_event, + &serde_json::to_value(fully_read_event).expect("to json value always works"), )?; if let Some(event) = &body.read_receipt { diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 3489a9a9..9eb63831 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -175,7 +175,7 @@ async fn sync_helper( services().rooms.edus.presence.ping_presence(&sender_user)?; // Setup watchers, so if there's no response, we can wait for them - let watcher = services().globals.db.watch(&sender_user, &sender_device); + let watcher = services().globals.watch(&sender_user, &sender_device); let next_batch = services().globals.current_count()?; let next_batch_string = next_batch.to_string(); diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index bbea2d58..abf2b873 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services}; +use crate::{Result, Ruma, services, Error}; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -18,18 +18,22 @@ pub async fn update_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut tags_event = services() + let event = services() .account_data .get( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - )? - .unwrap_or_else(|| TagEvent { + )?; + + let mut tags_event = event.map(|e| serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))) + .unwrap_or_else(|| Ok(TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, - }); + }))?; + tags_event .content .tags @@ -39,7 +43,7 @@ pub async fn update_tag_route( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - &tags_event, + &serde_json::to_value(tags_event).expect("to json value always works"), )?; Ok(create_tag::v3::Response {}) @@ -55,25 +59,29 @@ pub async fn delete_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut tags_event = services() + let mut event = services() .account_data .get( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - )? - .unwrap_or_else(|| TagEvent { + )?; + + let mut tags_event = event.map(|e| serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))) + .unwrap_or_else(|| Ok(TagEvent { content: TagEventContent { tags: BTreeMap::new(), }, - }); + }))?; + tags_event.content.tags.remove(&body.tag.clone().into()); services().account_data.update( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, - &tags_event, + &serde_json::to_value(tags_event).expect("to json value always works"), )?; Ok(delete_tag::v3::Response {}) @@ -89,20 +97,23 @@ pub async fn get_tags_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let mut event = services() + .account_data + .get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; + + let mut tags_event = event.map(|e| serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))) + .unwrap_or_else(|| Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }))?; + Ok(get_tags::v3::Response { - tags: services() - .account_data - .get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )? - .unwrap_or_else(|| TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }) - .content - .tags, + tags: tags_event.content.tags, }) } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 45d749d0..647f4574 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1655,10 +1655,10 @@ pub async fn get_devices_route( .collect(), master_key: services() .users - .get_master_key(&body.user_id, |u| u.server_name() == sender_servername)?, + .get_master_key(&body.user_id, &|u| u.server_name() == sender_servername)?, self_signing_key: services() .users - .get_self_signing_key(&body.user_id, |u| u.server_name() == sender_servername)?, + .get_self_signing_key(&body.user_id, &|u| u.server_name() == sender_servername)?, }) } diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 49c9170f..f0325d2b 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,19 +1,19 @@ -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw, RoomId}; use serde::{Serialize, de::DeserializeOwned}; use crate::{Result, database::KeyValueDatabase, service, Error, utils, services}; -impl service::account_data::Data for KeyValueDatabase { +impl service::account_data::Data for Arc { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - fn update( + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - data: &T, + data: &serde_json::Value, ) -> Result<()> { let mut prefix = room_id .map(|r| r.to_string()) @@ -32,8 +32,7 @@ impl service::account_data::Data for KeyValueDatabase { let mut key = prefix; key.extend_from_slice(event_type.to_string().as_bytes()); - let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling - if json.get("type").is_none() || json.get("content").is_none() { + if data.get("type").is_none() || data.get("content").is_none() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Account data doesn't have all required fields.", @@ -42,7 +41,7 @@ impl service::account_data::Data for KeyValueDatabase { self.roomuserdataid_accountdata.insert( &roomuserdataid, - &serde_json::to_vec(&json).expect("to_vec always works on json values"), + &serde_json::to_vec(&data).expect("to_vec always works on json values"), )?; let prev = self.roomusertype_roomuserdataid.get(&key)?; @@ -60,12 +59,12 @@ impl service::account_data::Data for KeyValueDatabase { /// Searches the account data for a specific kind. #[tracing::instrument(skip(self, room_id, user_id, kind))] - fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, - ) -> Result> { + ) -> Result>> { let mut key = room_id .map(|r| r.to_string()) .unwrap_or_default() diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index f427ba71..ee6ae206 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,3 +1,5 @@ +use std::sync::Arc; + use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::appservice::Data for KeyValueDatabase { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index e6652290..87119207 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,8 +1,136 @@ -use ruma::signatures::Ed25519KeyPair; +use std::{collections::BTreeMap, sync::Arc}; -use crate::{Result, service, database::KeyValueDatabase, Error, utils}; +use async_trait::async_trait; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use ruma::{signatures::Ed25519KeyPair, UserId, DeviceId, ServerName, api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerSigningKeyId, MilliSecondsSinceUnixEpoch}; + +use crate::{Result, service, database::KeyValueDatabase, Error, utils, services}; + +pub const COUNTER: &[u8] = b"c"; + +#[async_trait] +impl service::globals::Data for Arc { + fn next_count(&self) -> Result { + utils::u64_from_bytes(&self.global.increment(COUNTER)?) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) + } + + fn current_count(&self) -> Result { + self.global.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) + }) + } + + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + let userid_bytes = user_id.as_bytes().to_vec(); + let mut userid_prefix = userid_bytes.clone(); + userid_prefix.push(0xff); + + let mut userdeviceid_prefix = userid_prefix.clone(); + userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); + userdeviceid_prefix.push(0xff); + + let mut futures = FuturesUnordered::new(); + + // Return when *any* user changed his key + // TODO: only send for user they share a room with + futures.push( + self.todeviceid_events + .watch_prefix(&userdeviceid_prefix), + ); + + futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); + futures.push( + self.userroomid_invitestate + .watch_prefix(&userid_prefix), + ); + futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); + futures.push( + self.userroomid_notificationcount + .watch_prefix(&userid_prefix), + ); + futures.push( + self.userroomid_highlightcount + .watch_prefix(&userid_prefix), + ); + + // Events for rooms we are in + for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { + let short_roomid = services() + .rooms + .short + .get_shortroomid(&room_id) + .ok() + .flatten() + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let roomid_bytes = room_id.as_bytes().to_vec(); + let mut roomid_prefix = roomid_bytes.clone(); + roomid_prefix.push(0xff); + + // PDUs + futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); + + // EDUs + futures.push( + self.roomid_lasttypingupdate + .watch_prefix(&roomid_bytes), + ); + + futures.push( + self.readreceiptid_readreceipt + .watch_prefix(&roomid_prefix), + ); + + // Key changes + futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); + + // Room account data + let mut roomuser_prefix = roomid_prefix.clone(); + roomuser_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.roomusertype_roomuserdataid + .watch_prefix(&roomuser_prefix), + ); + } + + let mut globaluserdata_prefix = vec![0xff]; + globaluserdata_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.roomusertype_roomuserdataid + .watch_prefix(&globaluserdata_prefix), + ); + + // More key changes (used when user is not joined to any rooms) + futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); + + // One time keys + futures.push( + self.userid_lastonetimekeyupdate + .watch_prefix(&userid_bytes), + ); + + futures.push(Box::pin(services().globals.rotate.watch())); + + // Wait until one of them finds something + futures.next().await; + + Ok(()) + } + + fn cleanup(&self) -> Result<()> { + self._db.cleanup() + } + + fn memory_usage(&self) -> Result { + self._db.memory_usage() + } -impl service::globals::Data for KeyValueDatabase { fn load_keypair(&self) -> Result { let keypair_bytes = self.global.get(b"keypair")?.map_or_else( || { @@ -39,4 +167,81 @@ impl service::globals::Data for KeyValueDatabase { fn remove_keypair(&self) -> Result<()> { self.global.remove(b"keypair") } + + fn add_signing_key( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result, VerifyKey>> { + // Not atomic, but this is not critical + let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; + + let mut keys = signingkeys + .and_then(|keys| serde_json::from_slice(&keys).ok()) + .unwrap_or_else(|| { + // Just insert "now", it doesn't matter + ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) + }); + + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; + + keys.verify_keys.extend(verify_keys.into_iter()); + keys.old_verify_keys.extend(old_verify_keys.into_iter()); + + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), + )?; + + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + + Ok(tree) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result, VerifyKey>> { + let signingkeys = self + .server_signingkeys + .get(origin.as_bytes())? + .and_then(|bytes| serde_json::from_slice(&bytes).ok()) + .map(|keys: ServerSigningKeys| { + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + tree + }) + .unwrap_or_else(BTreeMap::new); + + Ok(signingkeys) + } + + fn database_version(&self) -> Result { + self.global.get(b"version")?.map_or(Ok(0), |version| { + utils::u64_from_bytes(&version) + .map_err(|_| Error::bad_database("Database version id is invalid.")) + }) + } + + fn bump_database_version(&self, new_version: u64) -> Result<()> { + self.global + .insert(b"version", &new_version.to_be_bytes())?; + Ok(()) + } + + } diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index 8171451c..c59ed36b 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,10 +1,10 @@ -use std::collections::BTreeMap; +use std::{collections::BTreeMap, sync::Arc}; use ruma::{UserId, serde::Raw, api::client::{backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind}, RoomId}; use crate::{Result, service, database::KeyValueDatabase, services, Error, utils}; -impl service::key_backups::Data for KeyValueDatabase { +impl service::key_backups::Data for Arc { fn create_backup( &self, user_id: &UserId, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index a84cbd53..1726755a 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,9 +1,11 @@ +use std::sync::Arc; + use ruma::api::client::error::ErrorKind; use crate::{database::KeyValueDatabase, service, Error, utils, Result}; -impl service::media::Data for KeyValueDatabase { - fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: &Option<&str>, content_type: &Option<&str>) -> Result> { +impl service::media::Data for Arc { + fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&width.to_be_bytes()); diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index b05e47be..85d1d864 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; use crate::{service, database::KeyValueDatabase, Error, Result}; -impl service::pusher::Data for KeyValueDatabase { +impl service::pusher::Data for Arc { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 0aa8dd48..437902df 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; -impl service::rooms::alias::Data for KeyValueDatabase { +impl service::rooms::alias::Data for Arc { fn set_alias( &self, alias: &RoomAliasId, diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 888d472d..2dffb04b 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,28 +1,60 @@ -use std::{collections::HashSet, mem::size_of}; +use std::{collections::HashSet, mem::size_of, sync::Arc}; use crate::{service, database::KeyValueDatabase, Result, utils}; -impl service::rooms::auth_chain::Data for KeyValueDatabase { - fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>> { - Ok(self.shorteventid_authchain - .get(&shorteventid.to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - })) +impl service::rooms::auth_chain::Data for Arc { + fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { + // Check RAM cache + if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { + return Ok(Some(Arc::clone(result))); + } + + // We only save auth chains for single events in the db + if key.len() == 1 { + // Check DB cache + let chain = self.shorteventid_authchain + .get(&key[0].to_be_bytes())? + .map(|chain| { + chain + .chunks_exact(size_of::()) + .map(|chunk| { + utils::u64_from_bytes(chunk).expect("byte length is correct") + }) + .collect() + }); + + if let Some(chain) = chain { + let chain = Arc::new(chain); + + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(vec![key[0]], Arc::clone(&chain)); + + return Ok(Some(chain)); + } + } + + Ok(None) + } - fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()> { - self.shorteventid_authchain.insert( - &shorteventid.to_be_bytes(), - &auth_chain - .iter() - .flat_map(|s| s.to_be_bytes().to_vec()) - .collect::>(), - ) + fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { + // Only persist single events in db + if key.len() == 1 { + self.shorteventid_authchain.insert( + &key[0].to_be_bytes(), + &auth_chain + .iter() + .flat_map(|s| s.to_be_bytes().to_vec()) + .collect::>(), + )?; + } + + // Cache in RAM + self.auth_chain_cache.lock().unwrap().insert(key, auth_chain); + + Ok(()) } } diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 727004e7..864e75e9 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::RoomId; use crate::{service, database::KeyValueDatabase, utils, Error, Result}; -impl service::rooms::directory::Data for KeyValueDatabase { +impl service::rooms::directory::Data for Arc { fn set_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.insert(room_id.as_bytes(), &[]) } diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index b5007f89..03e4219e 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -2,6 +2,8 @@ mod presence; mod typing; mod read_receipt; +use std::sync::Arc; + use crate::{service, database::KeyValueDatabase}; -impl service::rooms::edus::Data for KeyValueDatabase {} +impl service::rooms::edus::Data for Arc {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 1477c28b..5aeb1477 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,10 +1,10 @@ -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; -impl service::rooms::edus::presence::Data for KeyValueDatabase { +impl service::rooms::edus::presence::Data for Arc { fn update_presence( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index a12e2653..7fcb8ac8 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,10 +1,10 @@ -use std::mem; +use std::{mem, sync::Arc}; use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; -impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { +impl service::rooms::edus::read_receipt::Data for Arc { fn readreceipt_update( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index b7d35968..7f3526d9 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,10 +1,10 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use ruma::{UserId, RoomId}; use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; -impl service::rooms::edus::typing::Data for KeyValueDatabase { +impl service::rooms::edus::typing::Data for Arc { fn typing_add( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index 133e1d04..b16657aa 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, DeviceId, RoomId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::rooms::lazy_loading::Data for KeyValueDatabase { +impl service::rooms::lazy_loading::Data for Arc { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index db2bc69b..560beb90 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::RoomId; use crate::{service, database::KeyValueDatabase, Result, services}; -impl service::rooms::metadata::Data for KeyValueDatabase { +impl service::rooms::metadata::Data for Arc { fn exists(&self, room_id: &RoomId) -> Result { let prefix = match services().rooms.short.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index 406943ed..97c29e5b 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -15,6 +15,8 @@ mod state_compressor; mod timeline; mod user; +use std::sync::Arc; + use crate::{database::KeyValueDatabase, service}; -impl service::rooms::Data for KeyValueDatabase {} +impl service::rooms::Data for Arc {} diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index aa975449..b1ae816a 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{EventId, signatures::CanonicalJsonObject}; use crate::{service, database::KeyValueDatabase, PduEvent, Error, Result}; -impl service::rooms::outlier::Data for KeyValueDatabase { +impl service::rooms::outlier::Data for Arc { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index f3ac414f..f5e8f766 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -4,7 +4,7 @@ use ruma::{RoomId, EventId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::rooms::pdu_metadata::Data for KeyValueDatabase { +impl service::rooms::pdu_metadata::Data for Arc { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index dfbdbc64..7b8d2783 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,10 +1,10 @@ -use std::mem::size_of; +use std::{mem::size_of, sync::Arc}; use ruma::RoomId; use crate::{service, database::KeyValueDatabase, utils, Result, services}; -impl service::rooms::search::Data for KeyValueDatabase { +impl service::rooms::search::Data for Arc { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs index 91296385..9a302b56 100644 --- a/src/database/key_value/rooms/short.rs +++ b/src/database/key_value/rooms/short.rs @@ -1,4 +1,6 @@ +use std::sync::Arc; + use crate::{database::KeyValueDatabase, service}; -impl service::rooms::short::Data for KeyValueDatabase { +impl service::rooms::short::Data for Arc { } diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 405939dd..527c2403 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,11 +1,12 @@ use ruma::{RoomId, EventId}; +use tokio::sync::MutexGuard; use std::sync::Arc; -use std::{sync::MutexGuard, collections::HashSet}; +use std::collections::HashSet; use std::fmt::Debug; use crate::{service, database::KeyValueDatabase, utils, Error, Result}; -impl service::rooms::state::Data for KeyValueDatabase { +impl service::rooms::state::Data for Arc { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash .get(room_id.as_bytes())? @@ -48,7 +49,7 @@ impl service::rooms::state::Data for KeyValueDatabase { fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: impl IntoIterator + Debug, + event_ids: &mut dyn Iterator, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 4d5bd4a1..9af45db3 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -5,7 +5,7 @@ use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; #[async_trait] -impl service::rooms::state_accessor::Data for KeyValueDatabase { +impl service::rooms::state_accessor::Data for Arc { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 5f054858..bdb8cf81 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw}; use crate::{service, database::KeyValueDatabase, services, Result}; -impl service::rooms::state_cache::Data for KeyValueDatabase { +impl service::rooms::state_cache::Data for Arc { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index aee1890c..e1c0280b 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,8 +1,8 @@ -use std::{collections::HashSet, mem::size_of}; +use std::{collections::HashSet, mem::size_of, sync::Arc}; use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils, Result}; -impl service::rooms::state_compressor::Data for KeyValueDatabase { +impl service::rooms::state_compressor::Data for Arc { fn get_statediff(&self, shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index a3b6c17d..2d334b96 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -5,7 +5,7 @@ use tracing::error; use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services}; -impl service::rooms::timeline::Data for KeyValueDatabase { +impl service::rooms::timeline::Data for Arc { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 66681e3c..4d20b00a 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, RoomId}; use crate::{service, database::KeyValueDatabase, utils, Error, Result, services}; -impl service::rooms::user::Data for KeyValueDatabase { +impl service::rooms::user::Data for Arc { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index a63b3c5d..7fa69081 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, DeviceId, TransactionId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::transaction_ids::Data for KeyValueDatabase { +impl service::transaction_ids::Data for Arc { fn add_txnid( &self, user_id: &UserId, diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index cf242dec..8752e55a 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,8 +1,10 @@ +use std::sync::Arc; + use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}}; use crate::{database::KeyValueDatabase, service, Error, Result}; -impl service::uiaa::Data for KeyValueDatabase { +impl service::uiaa::Data for Arc { fn set_uiaa_request( &self, user_id: &UserId, diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 338d8800..1ac85b36 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,11 +1,11 @@ -use std::{mem::size_of, collections::BTreeMap}; +use std::{mem::size_of, collections::BTreeMap, sync::Arc}; use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; use tracing::warn; use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services, Result}; -impl service::users::Data for KeyValueDatabase { +impl service::users::Data for Arc { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) @@ -687,10 +687,10 @@ impl service::users::Data for KeyValueDatabase { }) } - fn get_master_key bool>( + fn get_master_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.userid_masterkeyid .get(user_id.as_bytes())? @@ -708,10 +708,10 @@ impl service::users::Data for KeyValueDatabase { }) } - fn get_self_signing_key bool>( + fn get_self_signing_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.userid_selfsigningkeyid .get(user_id.as_bytes())? diff --git a/src/database/mod.rs b/src/database/mod.rs index aa5c5839..35922f0b 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -402,10 +402,10 @@ impl KeyValueDatabase { }); - let services_raw = Services::build(Arc::clone(&db)); + let services_raw = Box::new(Services::build(Arc::clone(&db))); // This is the first and only time we initialize the SERVICE static - *SERVICES.write().unwrap() = Some(services_raw); + *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); // Matrix resource ownership is based on the server name; changing it @@ -877,105 +877,6 @@ impl KeyValueDatabase { services().globals.rotate.fire(); } - pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { - let userid_bytes = user_id.as_bytes().to_vec(); - let mut userid_prefix = userid_bytes.clone(); - userid_prefix.push(0xff); - - let mut userdeviceid_prefix = userid_prefix.clone(); - userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); - userdeviceid_prefix.push(0xff); - - let mut futures = FuturesUnordered::new(); - - // Return when *any* user changed his key - // TODO: only send for user they share a room with - futures.push( - self.todeviceid_events - .watch_prefix(&userdeviceid_prefix), - ); - - futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push( - self.userroomid_invitestate - .watch_prefix(&userid_prefix), - ); - futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); - futures.push( - self.userroomid_notificationcount - .watch_prefix(&userid_prefix), - ); - futures.push( - self.userroomid_highlightcount - .watch_prefix(&userid_prefix), - ); - - // Events for rooms we are in - for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { - let short_roomid = services() - .rooms - .short - .get_shortroomid(&room_id) - .ok() - .flatten() - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let roomid_bytes = room_id.as_bytes().to_vec(); - let mut roomid_prefix = roomid_bytes.clone(); - roomid_prefix.push(0xff); - - // PDUs - futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); - - // EDUs - futures.push( - self.roomid_lasttypingupdate - .watch_prefix(&roomid_bytes), - ); - - futures.push( - self.readreceiptid_readreceipt - .watch_prefix(&roomid_prefix), - ); - - // Key changes - futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); - - // Room account data - let mut roomuser_prefix = roomid_prefix.clone(); - roomuser_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.roomusertype_roomuserdataid - .watch_prefix(&roomuser_prefix), - ); - } - - let mut globaluserdata_prefix = vec![0xff]; - globaluserdata_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.roomusertype_roomuserdataid - .watch_prefix(&globaluserdata_prefix), - ); - - // More key changes (used when user is not joined to any rooms) - futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); - - // One time keys - futures.push( - self.userid_lastonetimekeyupdate - .watch_prefix(&userid_bytes), - ); - - futures.push(Box::pin(services().globals.rotate.watch())); - - // Wait until one of them finds something - futures.next().await; - } - #[tracing::instrument(skip(self))] pub fn flush(&self) -> Result<()> { let start = std::time::Instant::now(); @@ -1021,7 +922,7 @@ impl KeyValueDatabase { } let start = Instant::now(); - if let Err(e) = services().globals.db._db.cleanup() { + if let Err(e) = services().globals.cleanup() { error!("cleanup: Errored: {}", e); } else { info!("cleanup: Finished in {:?}", start.elapsed()); @@ -1048,9 +949,9 @@ fn set_emergency_access() -> Result { None, &conduit_user, GlobalAccountDataEventType::PushRules.to_string().into(), - &GlobalAccountDataEvent { + &serde_json::to_value(&GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, - }, + }).expect("to json value always works"), )?; res diff --git a/src/lib.rs b/src/lib.rs index 75cf6c7e..c103d529 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -20,9 +20,9 @@ pub use utils::error::{Error, Result}; pub use service::{Services, pdu::PduEvent}; pub use api::ruma_wrapper::{Ruma, RumaResponse}; -pub static SERVICES: RwLock>> = RwLock::new(None); +pub static SERVICES: RwLock> = RwLock::new(None); -pub fn services<'a>() -> Arc { - Arc::clone(&SERVICES.read().unwrap()) +pub fn services<'a>() -> &'static Services { + &SERVICES.read().unwrap().expect("SERVICES should be initialized when this is called") } diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs index 0f8e0bf5..65780a69 100644 --- a/src/service/account_data/data.rs +++ b/src/service/account_data/data.rs @@ -1,26 +1,25 @@ use std::collections::HashMap; use ruma::{UserId, RoomId, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw}; -use serde::{Serialize, de::DeserializeOwned}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Places one event in the account data of the user and removes the previous entry. - fn update( + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - data: &T, + data: &serde_json::Value, ) -> Result<()>; /// Searches the account data for a specific kind. - fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, kind: RoomAccountDataEventType, - ) -> Result>; + ) -> Result>>; /// Returns all changes to the account data that happened after `since`. fn changes_since( diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 35ca1495..9785478b 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -24,24 +24,24 @@ pub struct Service { impl Service { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] - pub fn update( + pub fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - data: &T, + data: &serde_json::Value, ) -> Result<()> { self.db.update(room_id, user_id, event_type, data) } /// Searches the account data for a specific kind. #[tracing::instrument(skip(self, room_id, user_id, event_type))] - pub fn get( + pub fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, event_type: RoomAccountDataEventType, - ) -> Result> { + ) -> Result>> { self.db.get(room_id, user_id, event_type) } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 48f828fc..32a709c1 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -28,7 +28,7 @@ use ruma::{ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; -use crate::{Result, services, Error, api::{server_server, client_server::AUTO_GEN_PASSWORD_LENGTH}, PduEvent, utils::{HtmlEscape, self}}; +use crate::{Result, services, Error, api::{server_server, client_server::{AUTO_GEN_PASSWORD_LENGTH, leave_all_rooms}}, PduEvent, utils::{HtmlEscape, self}}; use super::pdu::PduBuilder; @@ -179,7 +179,8 @@ impl Service { let conduit_room = services() .rooms - .id_from_alias( + .alias + .resolve_local_alias( format!("#admins:{}", services().globals.server_name()) .as_str() .try_into() @@ -221,7 +222,7 @@ impl Service { .roomid_mutex_state .write() .unwrap() - .entry(conduit_room.clone()) + .entry(conduit_room.to_owned()) .or_default(), ); @@ -599,11 +600,11 @@ impl Service { ruma::events::GlobalAccountDataEventType::PushRules .to_string() .into(), - &ruma::events::push_rules::PushRulesEvent { + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { content: ruma::events::push_rules::PushRulesEventContent { global: ruma::push::Ruleset::server_default(&user_id), }, - }, + }).expect("to json value always works"), )?; // we dont add a device since we're not the user, just the creator @@ -614,12 +615,14 @@ impl Service { )) } AdminCommand::DisableRoom { room_id } => { - services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; - RoomMessageEventContent::text_plain("Room disabled.") + todo!(); + //services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; + //RoomMessageEventContent::text_plain("Room disabled.") } AdminCommand::EnableRoom { room_id } => { - services().rooms.disabledroomids.remove(room_id.as_bytes())?; - RoomMessageEventContent::text_plain("Room enabled.") + todo!(); + //services().rooms.disabledroomids.remove(room_id.as_bytes())?; + //RoomMessageEventContent::text_plain("Room enabled.") } AdminCommand::DeactivateUser { leave_rooms, @@ -635,7 +638,7 @@ impl Service { services().users.deactivate_account(&user_id)?; if leave_rooms { - services().rooms.leave_all_rooms(&user_id).await?; + leave_all_rooms(&user_id).await?; } RoomMessageEventContent::text_plain(format!( @@ -694,7 +697,7 @@ impl Service { if leave_rooms { for &user_id in &user_ids { - let _ = services().rooms.leave_all_rooms(user_id).await; + let _ = leave_all_rooms(user_id).await; } } @@ -804,7 +807,7 @@ impl Service { pub(crate) async fn create_admin_room(&self) -> Result<()> { let room_id = RoomId::new(services().globals.server_name()); - services().rooms.get_or_create_shortroomid(&room_id)?; + services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( services().globals diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs index a70bf9c1..744f0f94 100644 --- a/src/service/appservice/data.rs +++ b/src/service/appservice/data.rs @@ -1,6 +1,6 @@ use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Registers an appservice and returns the ID to the caller fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 1a5ce50c..ad5ab4aa 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,10 +1,12 @@ mod data; +use std::sync::Arc; + pub use data::Data; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index f36ab61b..0f74b2a7 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,8 +1,30 @@ -use ruma::signatures::Ed25519KeyPair; +use std::collections::BTreeMap; + +use async_trait::async_trait; +use ruma::{signatures::Ed25519KeyPair, DeviceId, UserId, ServerName, api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerSigningKeyId}; use crate::Result; -pub trait Data { +#[async_trait] +pub trait Data: Send + Sync { + fn next_count(&self) -> Result; + fn current_count(&self) -> Result; + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + fn cleanup(&self) -> Result<()>; + fn memory_usage(&self) -> Result; fn load_keypair(&self) -> Result; fn remove_keypair(&self) -> Result<()>; + fn add_signing_key( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result, VerifyKey>>; + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result, VerifyKey>>; + fn database_version(&self) -> Result; + fn bump_database_version(&self, new_version: u64) -> Result<()>; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 48d7b064..8fd69dfe 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -26,8 +26,6 @@ use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; use tracing::error; use trust_dns_resolver::TokioAsyncResolver; -pub const COUNTER: &[u8] = b"c"; - type WellKnownMap = HashMap, (FedDest, String)>; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries @@ -198,16 +196,24 @@ impl Service { #[tracing::instrument(skip(self))] pub fn next_count(&self) -> Result { - utils::u64_from_bytes(&self.globals.increment(COUNTER)?) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) + self.db.next_count() } #[tracing::instrument(skip(self))] pub fn current_count(&self) -> Result { - self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) - }) + self.db.current_count() + } + + pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + self.db.watch(user_id, device_id).await + } + + pub fn cleanup(&self) -> Result<()> { + self.db.cleanup() + } + + pub fn memory_usage(&self) -> Result { + self.db.memory_usage() } pub fn server_name(&self) -> &ServerName { @@ -296,38 +302,7 @@ impl Service { origin: &ServerName, new_keys: ServerSigningKeys, ) -> Result, VerifyKey>> { - // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; - - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).ok()) - .unwrap_or_else(|| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); - - let ServerSigningKeys { - verify_keys, - old_verify_keys, - .. - } = new_keys; - - keys.verify_keys.extend(verify_keys.into_iter()); - keys.old_verify_keys.extend(old_verify_keys.into_iter()); - - self.server_signingkeys.insert( - origin.as_bytes(), - &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - )?; - - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - - Ok(tree) + self.db.add_signing_key(origin, new_keys) } /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. @@ -335,35 +310,15 @@ impl Service { &self, origin: &ServerName, ) -> Result, VerifyKey>> { - let signingkeys = self - .server_signingkeys - .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice(&bytes).ok()) - .map(|keys: ServerSigningKeys| { - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - tree - }) - .unwrap_or_else(BTreeMap::new); - - Ok(signingkeys) + self.db.signing_keys_for(origin) } pub fn database_version(&self) -> Result { - self.globals.get(b"version")?.map_or(Ok(0), |version| { - utils::u64_from_bytes(&version) - .map_err(|_| Error::bad_database("Database version id is invalid.")) - }) + self.db.database_version() } pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.globals - .insert(b"version", &new_version.to_be_bytes())?; - Ok(()) + self.db.bump_database_version(new_version) } pub fn get_media_folder(&self) -> PathBuf { diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index 6f6359eb..226b1e16 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -3,7 +3,7 @@ use std::collections::BTreeMap; use ruma::{api::client::backup::{BackupAlgorithm, RoomKeyBackup, KeyBackupData}, serde::Raw, UserId, RoomId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn create_backup( &self, user_id: &UserId, diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 94975de7..2e24049a 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,7 +1,7 @@ use crate::Result; -pub trait Data { - fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: &Option<&str>, content_type: &Option<&str>) -> Result>; +pub trait Data: Send + Sync { + fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result>; /// Returns content_disposition, content_type and the metadata key. fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)>; diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index d61292bb..f86251fa 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -24,8 +24,8 @@ impl Service { pub async fn create( &self, mxc: String, - content_disposition: &Option<&str>, - content_type: &Option<&str>, + content_disposition: Option<&str>, + content_type: Option<&str>, file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail @@ -42,8 +42,8 @@ impl Service { pub async fn upload_thumbnail( &self, mxc: String, - content_disposition: &Option<&str>, - content_type: &Option<&str>, + content_disposition: Option<&str>, + content_type: Option<&str>, width: u32, height: u32, file: &[u8], @@ -108,7 +108,7 @@ impl Service { .thumbnail_properties(width, height) .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, width, height) { + if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), width, height) { // Using saved thumbnail let path = services().globals.get_media_file(&key); let mut file = Vec::new(); @@ -119,7 +119,7 @@ impl Service { content_type, file: file.to_vec(), })) - } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { + } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), 0, 0) { // Generate a thumbnail let path = services().globals.get_media_file(&key); let mut file = Vec::new(); @@ -180,7 +180,7 @@ impl Service { thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; // Save thumbnail in database so we don't have to generate it again next time - let thumbnail_key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?; + let thumbnail_key = self.db.create_file_metadata(mxc, width, height, content_disposition.as_deref(), content_type.as_deref())?; let path = services().globals.get_media_file(&thumbnail_key); let mut f = File::create(path).await?; diff --git a/src/service/mod.rs b/src/service/mod.rs index 47d4651d..a1a728c5 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -30,20 +30,20 @@ pub struct Services { } impl Services { - pub fn build(db: Arc) { + pub fn build(db: Arc) -> Self { Self { - appservice: appservice::Service { db: Arc::clone(&db) }, - pusher: appservice::Service { db: Arc::clone(&db) }, - rooms: appservice::Service { db: Arc::clone(&db) }, - transaction_ids: appservice::Service { db: Arc::clone(&db) }, - uiaa: appservice::Service { db: Arc::clone(&db) }, - users: appservice::Service { db: Arc::clone(&db) }, - account_data: appservice::Service { db: Arc::clone(&db) }, - admin: appservice::Service { db: Arc::clone(&db) }, - globals: appservice::Service { db: Arc::clone(&db) }, - key_backups: appservice::Service { db: Arc::clone(&db) }, - media: appservice::Service { db: Arc::clone(&db) }, - sending: appservice::Service { db: Arc::clone(&db) }, + appservice: appservice::Service { db: db.clone() }, + pusher: pusher::Service { db: db.clone() }, + rooms: rooms::Service { db: Arc::clone(&db) }, + transaction_ids: transaction_ids::Service { db: Arc::clone(&db) }, + uiaa: uiaa::Service { db: Arc::clone(&db) }, + users: users::Service { db: Arc::clone(&db) }, + account_data: account_data::Service { db: Arc::clone(&db) }, + admin: admin::Service { db: Arc::clone(&db) }, + globals: globals::Service { db: Arc::clone(&db) }, + key_backups: key_backups::Service { db: Arc::clone(&db) }, + media: media::Service { db: Arc::clone(&db) }, + sending: sending::Service { db: Arc::clone(&db) }, } } } diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 2ed79f2c..3be3300c 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -343,7 +343,7 @@ pub(crate) fn gen_event_id_canonical_json( .and_then(|id| RoomId::parse(id.as_str()?).ok()) .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; - let room_version_id = services().rooms.get_room_version(&room_id); + let room_version_id = services().rooms.state.get_room_version(&room_id); let event_id = format!( "${}", diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 3951da79..305a5383 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,7 +1,7 @@ use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; fn get_pusher(&self, senderkey: &[u8]) -> Result>; diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index af30ca47..e65c57ab 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -3,6 +3,7 @@ pub use data::Data; use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; +use ruma::api::IncomingResponse; use ruma::{ api::{ client::push::{get_pushers, set_pusher, PusherKind}, @@ -20,11 +21,12 @@ use ruma::{ serde::Raw, uint, RoomId, UInt, UserId, }; +use std::sync::Arc; use std::{fmt::Debug, mem}; use tracing::{error, info, warn}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -47,8 +49,9 @@ impl Service { self.db.get_pusher_senderkeys(sender) } - #[tracing::instrument(skip(destination, request))] + #[tracing::instrument(skip(self, destination, request))] pub async fn send_request( + &self, destination: &str, request: T, ) -> Result @@ -124,7 +127,7 @@ impl Service { } } - #[tracing::instrument(skip(user, unread, pusher, ruleset, pdu))] + #[tracing::instrument(skip(self, user, unread, pusher, ruleset, pdu))] pub async fn send_push_notice( &self, user: &UserId, @@ -181,7 +184,7 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(user, ruleset, pdu))] + #[tracing::instrument(skip(self, user, ruleset, pdu))] pub fn get_actions<'a>( &self, user: &UserId, @@ -204,7 +207,7 @@ impl Service { Ok(ruleset.get_actions(pdu, &ctx)) } - #[tracing::instrument(skip(unread, pusher, tweaks, event))] + #[tracing::instrument(skip(self, unread, pusher, tweaks, event))] async fn send_notice( &self, unread: UInt, diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 81022096..26bffae2 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,7 +1,7 @@ use ruma::{RoomId, RoomAliasId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Creates or updates the alias to the given room id. fn set_alias( &self, diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index e4e8550b..13fac2dc 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,7 +1,7 @@ -use std::collections::HashSet; +use std::{collections::HashSet, sync::Arc}; use crate::Result; -pub trait Data { - fn get_cached_eventid_authchain(&self, shorteventid: u64) -> Result>>; - fn cache_eventid_authchain(&self, shorteventid: u64, auth_chain: &HashSet) -> Result<()>; +pub trait Data: Send + Sync { + fn get_cached_eventid_authchain(&self, shorteventid: &[u64]) -> Result>>>; + fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 26a3f3f0..5fe0e3e8 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -15,41 +15,11 @@ impl Service { &'a self, key: &[u64], ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key.to_be_bytes()) { - return Ok(Some(Arc::clone(result))); - } - - // We only save auth chains for single events in the db - if key.len() == 1 { - // Check DB cache - if let Some(chain) = self.db.get_cached_eventid_authchain(key[0]) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) + self.db.get_cached_eventid_authchain(key) } #[tracing::instrument(skip(self))] pub fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { - // Only persist single events in db - if key.len() == 1 { - self.db.cache_auth_chain(key[0], auth_chain)?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, auth_chain); - - Ok(()) + self.db.cache_auth_chain(key, auth_chain) } } diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index 13767217..b4e020d7 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,7 +1,7 @@ use ruma::RoomId; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Adds the room to the public room directory fn set_public(&self, room_id: &RoomId) -> Result<()>; diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index ca0e2410..f7592555 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -3,7 +3,7 @@ use std::collections::HashMap; use ruma::{UserId, RoomId, events::presence::PresenceEvent}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index e8ed9656..5ebd89d6 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,7 +1,7 @@ use ruma::{RoomId, events::receipt::ReceiptEvent, UserId, serde::Raw}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { /// Replaces the previous read receipt. fn readreceipt_update( &self, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index ec0be466..426d4e06 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -2,7 +2,7 @@ use std::collections::HashSet; use crate::Result; use ruma::{UserId, RoomId}; -pub trait Data { +pub trait Data: Send + Sync { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is /// called. fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e2291126..ac3cca6a 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -117,7 +117,7 @@ impl Service { room_id, pub_key_map, incoming_pdu.prev_events.clone(), - ).await; + ).await?; let mut errors = 0; for prev_id in dbg!(sorted_prev_events) { @@ -240,7 +240,7 @@ impl Service { r } - #[tracing::instrument(skip(create_event, value, pub_key_map))] + #[tracing::instrument(skip(self, create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( &self, origin: &'a ServerName, @@ -272,7 +272,7 @@ impl Service { RoomVersion::new(room_version_id).expect("room version is supported"); let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, + &*pub_key_map.read().expect("RwLock is poisoned."), &value, room_version_id, ) { @@ -301,7 +301,7 @@ impl Service { let incoming_pdu = serde_json::from_value::( serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), ) - .map_err(|_| "Event is not a valid PDU.".to_owned())?; + .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" @@ -329,7 +329,7 @@ impl Service { // Build map of auth events let mut auth_events = HashMap::new(); for id in &incoming_pdu.auth_events { - let auth_event = match services().rooms.get_pdu(id)? { + let auth_event = match services().rooms.timeline.get_pdu(id)? { Some(e) => e, None => { warn!("Could not find auth event {}", id); @@ -373,7 +373,8 @@ impl Service { &incoming_pdu, None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - )? { + ).map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? + { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth check failed", @@ -385,6 +386,7 @@ impl Service { // 7. Persist the event as an outlier. services() .rooms + .outlier .add_pdu_outlier(&incoming_pdu.event_id, &val)?; info!("Added pdu as outlier."); @@ -393,7 +395,7 @@ impl Service { }) } - #[tracing::instrument(skip(incoming_pdu, val, create_event, pub_key_map))] + #[tracing::instrument(skip(self, incoming_pdu, val, create_event, pub_key_map))] pub async fn upgrade_outlier_to_timeline_pdu( &self, incoming_pdu: Arc, @@ -412,7 +414,7 @@ impl Service { .rooms .pdu_metadata.is_event_soft_failed(&incoming_pdu.event_id)? { - return Err("Event has been soft failed".into()); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); } info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); @@ -1130,7 +1132,8 @@ impl Service { room_id: &RoomId, pub_key_map: &RwLock>>, initial_set: Vec>, - ) -> Vec<(Arc, HashMap, (Arc, BTreeMap)>)> { + ) -> Result<(Vec>, HashMap, +(Arc, BTreeMap)>)> { let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; @@ -1164,6 +1167,7 @@ impl Service { if let Some(json) = json_opt.or_else(|| { services() .rooms + .outlier .get_outlier_pdu_json(&prev_event_id) .ok() .flatten() @@ -1209,9 +1213,9 @@ impl Service { .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), )) - })?; + }).map_err(|_| Error::bad_database("Error sorting prev events"))?; - (sorted, eventid_info) + Ok((sorted, eventid_info)) } #[tracing::instrument(skip_all)] diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index f1019c13..524071c3 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,7 +1,7 @@ use ruma::{RoomId, DeviceId, UserId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 9b1ce079..9444db41 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,6 +1,6 @@ use ruma::RoomId; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index 17d0f7b4..edc7c4fd 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -2,7 +2,7 @@ use ruma::{signatures::CanonicalJsonObject, EventId}; use crate::{PduEvent, Result}; -pub trait Data { +pub trait Data: Send + Sync { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index fb839023..9bc49cfb 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -3,7 +3,7 @@ use std::sync::Arc; use ruma::{EventId, RoomId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index b62904c1..0c14ffe6 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,7 +1,7 @@ use ruma::RoomId; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>; fn search_pdus<'a>( diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index 3b1c3117..bc2b28f0 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -1,2 +1,2 @@ -pub trait Data { +pub trait Data: Send + Sync { } diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 7008d86f..20c177a2 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,9 +1,10 @@ use std::sync::Arc; -use std::{sync::MutexGuard, collections::HashSet}; +use std::collections::HashSet; use crate::Result; use ruma::{EventId, RoomId}; +use tokio::sync::MutexGuard; -pub trait Data { +pub trait Data: Send + Sync { /// Returns the last state hash key added to the db for the given room. fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; @@ -21,7 +22,7 @@ pub trait Data { /// Replace the forward extremities of the room. fn set_forward_extremities<'a>(&self, room_id: &RoomId, - event_ids: &dyn Iterator, + event_ids: &mut dyn Iterator, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 979060d9..53859785 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -16,7 +16,7 @@ pub struct Service { impl Service { /// Set the room to the given statehash and update caches. - pub fn force_state( + pub async fn force_state( &self, room_id: &RoomId, shortstatehash: u64, @@ -28,7 +28,7 @@ impl Service { .roomid_mutex_state .write() .unwrap() - .entry(body.room_id.to_owned()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; @@ -74,10 +74,10 @@ impl Service { Err(_) => continue, }; - services().room.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; + services().rooms.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; } - services().room.state_cache.update_joined_count(room_id)?; + services().rooms.state_cache.update_joined_count(room_id)?; self.db.set_room_state(room_id, shortstatehash, &state_lock); diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 48031e49..14f96bc8 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -6,7 +6,7 @@ use ruma::{EventId, events::StateEventType, RoomId}; use crate::{Result, PduEvent}; #[async_trait] -pub trait Data { +pub trait Data: Send + Sync { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index b45b2ea0..b9db7217 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,7 +1,7 @@ use ruma::{UserId, RoomId, serde::Raw, events::AnyStrippedStateEvent}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()>; diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs index cd872422..ce164c6d 100644 --- a/src/service/rooms/state_compressor/data.rs +++ b/src/service/rooms/state_compressor/data.rs @@ -9,7 +9,7 @@ pub struct StateDiff { pub removed: HashSet, } -pub trait Data { +pub trait Data: Send + Sync { fn get_statediff(&self, shortstatehash: u64) -> Result; fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()>; } diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 85bedc69..d073e865 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -4,7 +4,7 @@ use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; use crate::{Result, PduEvent}; -pub trait Data { +pub trait Data: Send + Sync { fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index a5657bc1..6b7ebc72 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,7 +1,7 @@ use ruma::{UserId, RoomId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index 6e71dd46..c5ff05c0 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,7 +1,7 @@ use ruma::{DeviceId, UserId, TransactionId}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn add_txnid( &self, user_id: &UserId, diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index d7fa79d2..091f0641 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,7 +1,7 @@ use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId, signatures::CanonicalJsonValue}; use crate::Result; -pub trait Data { +pub trait Data: Send + Sync { fn set_uiaa_request( &self, user_id: &UserId, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 8adc9366..b13ae1f2 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -223,18 +223,18 @@ impl Service { self.db.get_device_keys(user_id, device_id) } - pub fn get_master_key bool>( + pub fn get_master_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.db.get_master_key(user_id, allowed_signatures) } - pub fn get_self_signing_key bool>( + pub fn get_self_signing_key( &self, user_id: &UserId, - allowed_signatures: F, + allowed_signatures: &dyn Fn(&UserId) -> bool, ) -> Result>> { self.db.get_self_signing_key(user_id, allowed_signatures) } From 44fe6d1554eaa0a15314686974ab01f48c836588 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 18:36:12 +0200 Subject: [PATCH 389/445] 127 errors left --- src/api/client_server/membership.rs | 2 +- src/api/server_server.rs | 133 +---------- src/database/key_value/account_data.rs | 4 +- src/database/key_value/appservice.rs | 2 - src/database/key_value/globals.rs | 4 +- src/database/key_value/key_backups.rs | 4 +- src/database/key_value/media.rs | 4 +- src/database/key_value/pusher.rs | 4 +- src/database/key_value/rooms/alias.rs | 4 +- src/database/key_value/rooms/auth_chain.rs | 2 +- src/database/key_value/rooms/directory.rs | 4 +- src/database/key_value/rooms/edus/mod.rs | 4 +- src/database/key_value/rooms/edus/presence.rs | 4 +- .../key_value/rooms/edus/read_receipt.rs | 4 +- src/database/key_value/rooms/edus/typing.rs | 4 +- src/database/key_value/rooms/lazy_load.rs | 4 +- src/database/key_value/rooms/metadata.rs | 18 +- src/database/key_value/rooms/mod.rs | 4 +- src/database/key_value/rooms/outlier.rs | 4 +- src/database/key_value/rooms/pdu_metadata.rs | 2 +- src/database/key_value/rooms/search.rs | 4 +- src/database/key_value/rooms/short.rs | 225 +++++++++++++++++- src/database/key_value/rooms/state.rs | 2 +- .../key_value/rooms/state_accessor.rs | 2 +- src/database/key_value/rooms/state_cache.rs | 4 +- .../key_value/rooms/state_compressor.rs | 4 +- src/database/key_value/rooms/timeline.rs | 22 +- src/database/key_value/rooms/user.rs | 8 +- src/database/key_value/transaction_ids.rs | 4 +- src/database/key_value/uiaa.rs | 4 +- src/database/key_value/users.rs | 6 +- src/database/mod.rs | 24 +- src/service/account_data/mod.rs | 2 +- src/service/admin/mod.rs | 12 +- src/service/globals/mod.rs | 4 +- src/service/key_backups/mod.rs | 2 +- src/service/media/mod.rs | 2 +- src/service/mod.rs | 86 +++++-- src/service/rooms/alias/mod.rs | 4 +- src/service/rooms/auth_chain/mod.rs | 135 ++++++++++- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/edus/presence/mod.rs | 4 +- src/service/rooms/edus/read_receipt/mod.rs | 4 +- src/service/rooms/edus/typing/mod.rs | 4 +- src/service/rooms/event_handler/mod.rs | 84 ++++--- src/service/rooms/lazy_loading/mod.rs | 4 +- src/service/rooms/metadata/data.rs | 2 + src/service/rooms/metadata/mod.rs | 12 +- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/pdu_metadata/mod.rs | 2 +- src/service/rooms/search/mod.rs | 4 +- src/service/rooms/short/data.rs | 38 +++ src/service/rooms/short/mod.rs | 190 +-------------- src/service/rooms/state/mod.rs | 99 +++++--- src/service/rooms/state_accessor/mod.rs | 4 +- src/service/rooms/state_cache/mod.rs | 68 ++++-- src/service/rooms/state_compressor/mod.rs | 6 +- src/service/rooms/timeline/data.rs | 1 + src/service/rooms/timeline/mod.rs | 26 +- src/service/rooms/user/mod.rs | 4 +- src/service/sending/mod.rs | 8 - src/service/transaction_ids/mod.rs | 4 +- src/service/uiaa/mod.rs | 4 +- src/service/users/mod.rs | 4 +- src/utils/mod.rs | 12 +- 65 files changed, 810 insertions(+), 557 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 58ed0401..f07f2adb 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -654,7 +654,7 @@ async fn join_room_by_id_helper( // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - services().rooms.state.set_room_state(room_id, shortstatehash)?; + services().rooms.state.set_room_state(room_id, shortstatehash, &state_lock)?; let statehashid = services().rooms.state.append_to_state(&parsed_pdu)?; } else { diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 647f4574..11f7ec34 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -857,131 +857,6 @@ pub async fn send_transaction_message_route( Ok(send_transaction_message::v1::Response { pdus: resolved_map.into_iter().map(|(e, r)| (e, r.map_err(|e| e.to_string()))).collect() }) } -#[tracing::instrument(skip(starting_events))] -pub(crate) async fn get_auth_chain<'a>( - room_id: &RoomId, - starting_events: Vec>, -) -> Result> + 'a> { - const NUM_BUCKETS: usize = 50; - - let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; - - let mut i = 0; - for id in starting_events { - let short = services().rooms.short.get_or_create_shorteventid(&id)?; - let bucket_id = (short % NUM_BUCKETS as u64) as usize; - buckets[bucket_id].insert((short, id.clone())); - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - } - - let mut full_auth_chain = HashSet::new(); - - let mut hits = 0; - let mut misses = 0; - for chunk in buckets { - if chunk.is_empty() { - continue; - } - - let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? { - hits += 1; - full_auth_chain.extend(cached.iter().copied()); - continue; - } - misses += 1; - - let mut chunk_cache = HashSet::new(); - let mut hits2 = 0; - let mut misses2 = 0; - let mut i = 0; - for (sevent_id, event_id) in chunk { - if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? { - hits2 += 1; - chunk_cache.extend(cached.iter().copied()); - } else { - misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id)?); - services().rooms - .auth_chain - .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; - println!( - "cache missed event {} with auth chain len {}", - event_id, - auth_chain.len() - ); - chunk_cache.extend(auth_chain.iter()); - - i += 1; - if i % 100 == 0 { - tokio::task::yield_now().await; - } - }; - } - println!( - "chunk missed with len {}, event hits2: {}, misses2: {}", - chunk_cache.len(), - hits2, - misses2 - ); - let chunk_cache = Arc::new(chunk_cache); - services().rooms - .auth_chain.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; - full_auth_chain.extend(chunk_cache.iter()); - } - - println!( - "total: {}, chunk hits: {}, misses: {}", - full_auth_chain.len(), - hits, - misses - ); - - Ok(full_auth_chain - .into_iter() - .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) -} - -#[tracing::instrument(skip(event_id))] -fn get_auth_chain_inner( - room_id: &RoomId, - event_id: &EventId, -) -> Result> { - let mut todo = vec![Arc::from(event_id)]; - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop() { - match services().rooms.timeline.get_pdu(&event_id) { - Ok(Some(pdu)) => { - if pdu.room_id != room_id { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); - } - for auth_event in &pdu.auth_events { - let sauthevent = services() - .rooms.short - .get_or_create_shorteventid(auth_event)?; - - if !found.contains(&sauthevent) { - found.insert(sauthevent); - todo.push(auth_event.clone()); - } - } - } - Ok(None) => { - warn!("Could not find pdu mentioned in auth events: {}", event_id); - } - Err(e) => { - warn!("Could not load event in auth chain: {} {}", event_id, e); - } - } - } - - Ok(found) -} - /// # `GET /_matrix/federation/v1/event/{eventId}` /// /// Retrieves a single event from the server. @@ -1135,7 +1010,7 @@ pub async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]).await?; + let auth_chain_ids = services().rooms.auth_chain.get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -1190,7 +1065,7 @@ pub async fn get_room_state_route( .collect(); let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; + services().rooms.auth_chain.get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids @@ -1246,7 +1121,7 @@ pub async fn get_room_state_ids_route( .collect(); let auth_chain_ids = - get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; + services().rooms.auth_chain.get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), @@ -1449,7 +1324,7 @@ async fn create_join_event( drop(mutex_lock); let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; - let auth_chain_ids = get_auth_chain( + let auth_chain_ids = services().rooms.auth_chain.get_auth_chain( room_id, state_ids.iter().map(|(_, id)| id.clone()).collect(), ) diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index f0325d2b..5674ac07 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,11 +1,11 @@ -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw, RoomId}; use serde::{Serialize, de::DeserializeOwned}; use crate::{Result, database::KeyValueDatabase, service, Error, utils, services}; -impl service::account_data::Data for Arc { +impl service::account_data::Data for KeyValueDatabase { /// Places one event in the account data of the user and removes the previous entry. #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] fn update( diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index ee6ae206..f427ba71 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,5 +1,3 @@ -use std::sync::Arc; - use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::appservice::Data for KeyValueDatabase { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 87119207..199cbf64 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -1,4 +1,4 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; use async_trait::async_trait; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -9,7 +9,7 @@ use crate::{Result, service, database::KeyValueDatabase, Error, utils, services} pub const COUNTER: &[u8] = b"c"; #[async_trait] -impl service::globals::Data for Arc { +impl service::globals::Data for KeyValueDatabase { fn next_count(&self) -> Result { utils::u64_from_bytes(&self.global.increment(COUNTER)?) .map_err(|_| Error::bad_database("Count has invalid bytes.")) diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index c59ed36b..8171451c 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,10 +1,10 @@ -use std::{collections::BTreeMap, sync::Arc}; +use std::collections::BTreeMap; use ruma::{UserId, serde::Raw, api::client::{backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind}, RoomId}; use crate::{Result, service, database::KeyValueDatabase, services, Error, utils}; -impl service::key_backups::Data for Arc { +impl service::key_backups::Data for KeyValueDatabase { fn create_backup( &self, user_id: &UserId, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index 1726755a..f0244872 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::api::client::error::ErrorKind; use crate::{database::KeyValueDatabase, service, Error, utils, Result}; -impl service::media::Data for Arc { +impl service::media::Data for KeyValueDatabase { fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 85d1d864..b05e47be 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; use crate::{service, database::KeyValueDatabase, Error, Result}; -impl service::pusher::Data for Arc { +impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { let mut key = sender.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 437902df..0aa8dd48 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; -impl service::rooms::alias::Data for Arc { +impl service::rooms::alias::Data for KeyValueDatabase { fn set_alias( &self, alias: &RoomAliasId, diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 2dffb04b..49d39560 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -2,7 +2,7 @@ use std::{collections::HashSet, mem::size_of, sync::Arc}; use crate::{service, database::KeyValueDatabase, Result, utils}; -impl service::rooms::auth_chain::Data for Arc { +impl service::rooms::auth_chain::Data for KeyValueDatabase { fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { // Check RAM cache if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 864e75e9..727004e7 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::RoomId; use crate::{service, database::KeyValueDatabase, utils, Error, Result}; -impl service::rooms::directory::Data for Arc { +impl service::rooms::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { self.publicroomids.insert(room_id.as_bytes(), &[]) } diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index 03e4219e..b5007f89 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -2,8 +2,6 @@ mod presence; mod typing; mod read_receipt; -use std::sync::Arc; - use crate::{service, database::KeyValueDatabase}; -impl service::rooms::edus::Data for Arc {} +impl service::rooms::edus::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 5aeb1477..1477c28b 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,10 +1,10 @@ -use std::{collections::HashMap, sync::Arc}; +use std::collections::HashMap; use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; -impl service::rooms::edus::presence::Data for Arc { +impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 7fcb8ac8..a12e2653 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,10 +1,10 @@ -use std::{mem, sync::Arc}; +use std::mem; use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; -impl service::rooms::edus::read_receipt::Data for Arc { +impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 7f3526d9..b7d35968 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,10 +1,10 @@ -use std::{collections::HashSet, sync::Arc}; +use std::collections::HashSet; use ruma::{UserId, RoomId}; use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; -impl service::rooms::edus::typing::Data for Arc { +impl service::rooms::edus::typing::Data for KeyValueDatabase { fn typing_add( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index b16657aa..133e1d04 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, DeviceId, RoomId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::rooms::lazy_loading::Data for Arc { +impl service::rooms::lazy_loading::Data for KeyValueDatabase { fn lazy_load_was_sent_before( &self, user_id: &UserId, diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 560beb90..72f62514 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::RoomId; use crate::{service, database::KeyValueDatabase, Result, services}; -impl service::rooms::metadata::Data for Arc { +impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { let prefix = match services().rooms.short.get_shortroomid(room_id)? { Some(b) => b.to_be_bytes().to_vec(), @@ -19,4 +17,18 @@ impl service::rooms::metadata::Data for Arc { .filter(|(k, _)| k.starts_with(&prefix)) .is_some()) } + + fn is_disabled(&self, room_id: &RoomId) -> Result { + Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) + } + + fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { + if disabled { + self.disabledroomids.insert(room_id.as_bytes(), &[])?; + } else { + self.disabledroomids.remove(room_id.as_bytes())?; + } + + Ok(()) + } } diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs index 97c29e5b..406943ed 100644 --- a/src/database/key_value/rooms/mod.rs +++ b/src/database/key_value/rooms/mod.rs @@ -15,8 +15,6 @@ mod state_compressor; mod timeline; mod user; -use std::sync::Arc; - use crate::{database::KeyValueDatabase, service}; -impl service::rooms::Data for Arc {} +impl service::rooms::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index b1ae816a..aa975449 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{EventId, signatures::CanonicalJsonObject}; use crate::{service, database::KeyValueDatabase, PduEvent, Error, Result}; -impl service::rooms::outlier::Data for Arc { +impl service::rooms::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_outlierpdu .get(event_id.as_bytes())? diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index f5e8f766..f3ac414f 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -4,7 +4,7 @@ use ruma::{RoomId, EventId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::rooms::pdu_metadata::Data for Arc { +impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { for prev in event_ids { let mut key = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 7b8d2783..dfbdbc64 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -1,10 +1,10 @@ -use std::{mem::size_of, sync::Arc}; +use std::mem::size_of; use ruma::RoomId; use crate::{service, database::KeyValueDatabase, utils, Result, services}; -impl service::rooms::search::Data for Arc { +impl service::rooms::search::Data for KeyValueDatabase { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs index 9a302b56..ecd12dad 100644 --- a/src/database/key_value/rooms/short.rs +++ b/src/database/key_value/rooms/short.rs @@ -1,6 +1,227 @@ use std::sync::Arc; -use crate::{database::KeyValueDatabase, service}; +use ruma::{EventId, events::StateEventType, RoomId}; -impl service::rooms::short::Data for Arc { +use crate::{Result, database::KeyValueDatabase, service, utils, Error, services}; + +impl service::rooms::short::Data for KeyValueDatabase { + fn get_or_create_shorteventid( + &self, + event_id: &EventId, + ) -> Result { + if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { + return Ok(*short); + } + + let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { + Some(shorteventid) => utils::u64_from_bytes(&shorteventid) + .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, + None => { + let shorteventid = services().globals.next_count()?; + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; + shorteventid + } + }; + + self.eventidshort_cache + .lock() + .unwrap() + .insert(event_id.to_owned(), short); + + Ok(short) + } + + fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result> { + if let Some(short) = self + .statekeyshort_cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + return Ok(Some(*short)); + } + + let mut statekey = event_type.to_string().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = self + .statekey_shortstatekey + .get(&statekey)? + .map(|shortstatekey| { + utils::u64_from_bytes(&shortstatekey) + .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) + }) + .transpose()?; + + if let Some(s) = short { + self.statekeyshort_cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), s); + } + + Ok(short) + } + + fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + if let Some(short) = self + .statekeyshort_cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + return Ok(*short); + } + + let mut statekey = event_type.to_string().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) + .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, + None => { + let shortstatekey = services().globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + self.shortstatekey_statekey + .insert(&shortstatekey.to_be_bytes(), &statekey)?; + shortstatekey + } + }; + + self.statekeyshort_cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), short); + + Ok(short) + } + + fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + if let Some(id) = self + .shorteventid_cache + .lock() + .unwrap() + .get_mut(&shorteventid) + { + return Ok(Arc::clone(id)); + } + + let bytes = self + .shorteventid_eventid + .get(&shorteventid.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; + + let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; + + self.shorteventid_cache + .lock() + .unwrap() + .insert(shorteventid, Arc::clone(&event_id)); + + Ok(event_id) + } + + fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + if let Some(id) = self + .shortstatekey_cache + .lock() + .unwrap() + .get_mut(&shortstatekey) + { + return Ok(id.clone()); + } + + let bytes = self + .shortstatekey_statekey + .get(&shortstatekey.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; + + let mut parts = bytes.splitn(2, |&b| b == 0xff); + let eventtype_bytes = parts.next().expect("split always returns one entry"); + let statekey_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; + + let event_type = + StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { + Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; + + let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { + Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") + })?; + + let result = (event_type, state_key); + + self.shortstatekey_cache + .lock() + .unwrap() + .insert(shortstatekey, result.clone()); + + Ok(result) + } + + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash( + &self, + state_hash: &[u8], + ) -> Result<(u64, bool)> { + Ok(match self.statehash_shortstatehash.get(state_hash)? { + Some(shortstatehash) => ( + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, + true, + ), + None => { + let shortstatehash = services().globals.next_count()?; + self.statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) + } + }) + } + + fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.roomid_shortroomid + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + }) + .transpose() + } + + fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + ) -> Result { + Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, + None => { + let short = services().globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + } + }) + } } diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 527c2403..b2822b32 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -6,7 +6,7 @@ use std::fmt::Debug; use crate::{service, database::KeyValueDatabase, utils, Error, Result}; -impl service::rooms::state::Data for Arc { +impl service::rooms::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { self.roomid_shortstatehash .get(room_id.as_bytes())? diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 9af45db3..4d5bd4a1 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -5,7 +5,7 @@ use async_trait::async_trait; use ruma::{EventId, events::StateEventType, RoomId}; #[async_trait] -impl service::rooms::state_accessor::Data for Arc { +impl service::rooms::state_accessor::Data for KeyValueDatabase { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { let full_state = services().rooms.state_compressor .load_shortstatehash_info(shortstatehash)? diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index bdb8cf81..5f054858 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw}; use crate::{service, database::KeyValueDatabase, services, Result}; -impl service::rooms::state_cache::Data for Arc { +impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index e1c0280b..aee1890c 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,8 +1,8 @@ -use std::{collections::HashSet, mem::size_of, sync::Arc}; +use std::{collections::HashSet, mem::size_of}; use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils, Result}; -impl service::rooms::state_compressor::Data for Arc { +impl service::rooms::state_compressor::Data for KeyValueDatabase { fn get_statediff(&self, shortstatehash: u64) -> Result { let value = self .shortstatehash_statediff diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 2d334b96..0b7286b2 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -5,7 +5,27 @@ use tracing::error; use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services}; -impl service::rooms::timeline::Data for Arc { +impl service::rooms::timeline::Data for KeyValueDatabase { + fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { + let prefix = services().rooms.short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + // Look for PDUs in that room. + self.pduid_pdu + .iter_from(&prefix, false) + .filter(|(k, _)| k.starts_with(&prefix)) + .map(|(_, pdu)| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid first PDU in db.")) + .map(Arc::new) + }) + .next() + .transpose() + } + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { match self .lasttimelinecount_cache diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 4d20b00a..3759bda7 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, RoomId}; use crate::{service, database::KeyValueDatabase, utils, Error, Result, services}; -impl service::rooms::user::Data for Arc { +impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); @@ -104,13 +102,13 @@ impl service::rooms::user::Data for Arc { }); // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) + Ok(Box::new(Box::new(utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) + })))) } } diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index 7fa69081..a63b3c5d 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, DeviceId, TransactionId}; use crate::{service, database::KeyValueDatabase, Result}; -impl service::transaction_ids::Data for Arc { +impl service::transaction_ids::Data for KeyValueDatabase { fn add_txnid( &self, user_id: &UserId, diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 8752e55a..cf242dec 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,10 +1,8 @@ -use std::sync::Arc; - use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}}; use crate::{database::KeyValueDatabase, service, Error, Result}; -impl service::uiaa::Data for Arc { +impl service::uiaa::Data for KeyValueDatabase { fn set_uiaa_request( &self, user_id: &UserId, diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 1ac85b36..55a518d4 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,11 +1,11 @@ -use std::{mem::size_of, collections::BTreeMap, sync::Arc}; +use std::{mem::size_of, collections::BTreeMap}; use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; use tracing::warn; use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services, Result}; -impl service::users::Data for Arc { +impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) @@ -113,7 +113,7 @@ impl service::users::Data for Arc { /// Hash and set the user's password to the Argon2 hash fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { - if let Ok(hash) = utils::calculate_hash(password) { + if let Ok(hash) = utils::calculate_password_hash(password) { self.userid_password .insert(user_id.as_bytes(), hash.as_bytes())?; Ok(()) diff --git a/src/database/mod.rs b/src/database/mod.rs index 35922f0b..68684677 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -238,8 +238,8 @@ impl KeyValueDatabase { } /// Load an existing database or create a new one. - pub async fn load_or_create(config: &Config) -> Result<()> { - Self::check_db_setup(config)?; + pub async fn load_or_create(config: Config) -> Result<()> { + Self::check_db_setup(&config)?; if !Path::new(&config.database_path).exists() { std::fs::create_dir_all(&config.database_path) @@ -251,19 +251,19 @@ impl KeyValueDatabase { #[cfg(not(feature = "sqlite"))] return Err(Error::BadConfig("Database backend not found.")); #[cfg(feature = "sqlite")] - Arc::new(Arc::::open(config)?) + Arc::new(Arc::::open(&config)?) } "rocksdb" => { #[cfg(not(feature = "rocksdb"))] return Err(Error::BadConfig("Database backend not found.")); #[cfg(feature = "rocksdb")] - Arc::new(Arc::::open(config)?) + Arc::new(Arc::::open(&config)?) } "persy" => { #[cfg(not(feature = "persy"))] return Err(Error::BadConfig("Database backend not found.")); #[cfg(feature = "persy")] - Arc::new(Arc::::open(config)?) + Arc::new(Arc::::open(&config)?) } _ => { return Err(Error::BadConfig("Database backend not found.")); @@ -402,7 +402,7 @@ impl KeyValueDatabase { }); - let services_raw = Box::new(Services::build(Arc::clone(&db))); + let services_raw = Box::new(Services::build(Arc::clone(&db), config)?); // This is the first and only time we initialize the SERVICE static *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); @@ -825,7 +825,7 @@ impl KeyValueDatabase { info!( "Loaded {} database with version {}", - config.database_backend, latest_database_version + services().globals.config.database_backend, latest_database_version ); } else { services() @@ -837,7 +837,7 @@ impl KeyValueDatabase { warn!( "Created new {} database with version {}", - config.database_backend, latest_database_version + services().globals.config.database_backend, latest_database_version ); } @@ -866,7 +866,7 @@ impl KeyValueDatabase { .sending .start_handler(sending_receiver); - Self::start_cleanup_task(config).await; + Self::start_cleanup_task().await; Ok(()) } @@ -888,8 +888,8 @@ impl KeyValueDatabase { res } - #[tracing::instrument(skip(config))] - pub async fn start_cleanup_task(config: &Config) { + #[tracing::instrument] + pub async fn start_cleanup_task() { use tokio::time::interval; #[cfg(unix)] @@ -898,7 +898,7 @@ impl KeyValueDatabase { use std::time::{Duration, Instant}; - let timer_interval = Duration::from_secs(config.cleanup_second_interval as u64); + let timer_interval = Duration::from_secs(services().globals.config.cleanup_second_interval as u64); tokio::spawn(async move { let mut i = interval(timer_interval); diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 9785478b..1289f7a3 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -18,7 +18,7 @@ use tracing::error; use crate::{service::*, services, utils, Error, Result}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 32a709c1..0b14314f 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -426,7 +426,7 @@ impl Service { Error::bad_database("Invalid room id field in event in database") })?; let start = Instant::now(); - let count = server_server::get_auth_chain(room_id, vec![event_id]) + let count = services().rooms.auth_chain.get_auth_chain(room_id, vec![event_id]) .await? .count(); let elapsed = start.elapsed(); @@ -615,14 +615,12 @@ impl Service { )) } AdminCommand::DisableRoom { room_id } => { - todo!(); - //services().rooms.disabledroomids.insert(room_id.as_bytes(), &[])?; - //RoomMessageEventContent::text_plain("Room disabled.") + services().rooms.metadata.disable_room(&room_id, true); + RoomMessageEventContent::text_plain("Room disabled.") } AdminCommand::EnableRoom { room_id } => { - todo!(); - //services().rooms.disabledroomids.remove(room_id.as_bytes())?; - //RoomMessageEventContent::text_plain("Room enabled.") + services().rooms.metadata.disable_room(&room_id, false); + RoomMessageEventContent::text_plain("Room enabled.") } AdminCommand::DeactivateUser { leave_rooms, diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 8fd69dfe..de8d1aa7 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -35,7 +35,7 @@ type SyncHandle = ( ); pub struct Service { - pub db: Box, + pub db: Arc, pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, @@ -92,7 +92,7 @@ impl Default for RotationHandler { impl Service { pub fn load( - db: Box, + db: Arc, config: Config, ) -> Result { let keypair = db.load_keypair(); diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 4bd9efd3..a3bed714 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -13,7 +13,7 @@ use ruma::{ use std::{collections::BTreeMap, sync::Arc}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index f86251fa..d3dd2bdc 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -16,7 +16,7 @@ pub struct FileMeta { } pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/mod.rs b/src/service/mod.rs index a1a728c5..a772c1db 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,4 +1,9 @@ -use std::sync::Arc; +use std::{ + collections::{BTreeMap, HashMap}, + sync::{Arc, Mutex}, +}; + +use crate::{Result, Config}; pub mod account_data; pub mod admin; @@ -30,20 +35,73 @@ pub struct Services { } impl Services { - pub fn build(db: Arc) -> Self { - Self { + pub fn build< + D: appservice::Data + + pusher::Data + + rooms::Data + + transaction_ids::Data + + uiaa::Data + + users::Data + + account_data::Data + + globals::Data + + key_backups::Data + + media::Data, + >( + db: Arc, config: Config + ) -> Result { + Ok(Self { appservice: appservice::Service { db: db.clone() }, pusher: pusher::Service { db: db.clone() }, - rooms: rooms::Service { db: Arc::clone(&db) }, - transaction_ids: transaction_ids::Service { db: Arc::clone(&db) }, - uiaa: uiaa::Service { db: Arc::clone(&db) }, - users: users::Service { db: Arc::clone(&db) }, - account_data: account_data::Service { db: Arc::clone(&db) }, - admin: admin::Service { db: Arc::clone(&db) }, - globals: globals::Service { db: Arc::clone(&db) }, - key_backups: key_backups::Service { db: Arc::clone(&db) }, - media: media::Service { db: Arc::clone(&db) }, - sending: sending::Service { db: Arc::clone(&db) }, - } + rooms: rooms::Service { + alias: rooms::alias::Service { db: db.clone() }, + auth_chain: rooms::auth_chain::Service { db: db.clone() }, + directory: rooms::directory::Service { db: db.clone() }, + edus: rooms::edus::Service { + presence: rooms::edus::presence::Service { db: db.clone() }, + read_receipt: rooms::edus::read_receipt::Service { db: db.clone() }, + typing: rooms::edus::typing::Service { db: db.clone() }, + }, + event_handler: rooms::event_handler::Service, + lazy_loading: rooms::lazy_loading::Service { + db: db.clone(), + lazy_load_waiting: Mutex::new(HashMap::new()), + }, + metadata: rooms::metadata::Service { db: db.clone() }, + outlier: rooms::outlier::Service { db: db.clone() }, + pdu_metadata: rooms::pdu_metadata::Service { db: db.clone() }, + search: rooms::search::Service { db: db.clone() }, + short: rooms::short::Service { db: db.clone() }, + state: rooms::state::Service { db: db.clone() }, + state_accessor: rooms::state_accessor::Service { db: db.clone() }, + state_cache: rooms::state_cache::Service { db: db.clone() }, + state_compressor: rooms::state_compressor::Service { db: db.clone() }, + timeline: rooms::timeline::Service { db: db.clone() }, + user: rooms::user::Service { db: db.clone() }, + }, + transaction_ids: transaction_ids::Service { + db: db.clone() + }, + uiaa: uiaa::Service { + db: db.clone() + }, + users: users::Service { + db: db.clone() + }, + account_data: account_data::Service { + db: db.clone() + }, + admin: admin::Service { sender: todo!() }, + globals: globals::Service::load(db.clone(), config)?, + key_backups: key_backups::Service { + db: db.clone() + }, + media: media::Service { + db: db.clone() + }, + sending: sending::Service { + maximum_requests: todo!(), + sender: todo!(), + }, + }) } } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index ef5888fc..65fb3677 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{RoomAliasId, RoomId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index 5fe0e3e8..e35094bb 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,12 +1,14 @@ mod data; -use std::{sync::Arc, collections::HashSet}; +use std::{sync::Arc, collections::{HashSet, BTreeSet}}; pub use data::Data; +use ruma::{RoomId, EventId, api::client::error::ErrorKind}; +use tracing::log::warn; -use crate::Result; +use crate::{Result, services, Error}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -22,4 +24,131 @@ impl Service { pub fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { self.db.cache_auth_chain(key, auth_chain) } + + #[tracing::instrument(skip(self, starting_events))] + pub async fn get_auth_chain<'a>( + &self, + room_id: &RoomId, + starting_events: Vec>, + ) -> Result> + 'a> { + const NUM_BUCKETS: usize = 50; + + let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; + + let mut i = 0; + for id in starting_events { + let short = services().rooms.short.get_or_create_shorteventid(&id)?; + let bucket_id = (short % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, id.clone())); + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + let mut full_auth_chain = HashSet::new(); + + let mut hits = 0; + let mut misses = 0; + for chunk in buckets { + if chunk.is_empty() { + continue; + } + + let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); + if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? { + hits += 1; + full_auth_chain.extend(cached.iter().copied()); + continue; + } + misses += 1; + + let mut chunk_cache = HashSet::new(); + let mut hits2 = 0; + let mut misses2 = 0; + let mut i = 0; + for (sevent_id, event_id) in chunk { + if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? { + hits2 += 1; + chunk_cache.extend(cached.iter().copied()); + } else { + misses2 += 1; + let auth_chain = Arc::new(self.get_auth_chain_inner(room_id, &event_id)?); + services().rooms + .auth_chain + .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; + println!( + "cache missed event {} with auth chain len {}", + event_id, + auth_chain.len() + ); + chunk_cache.extend(auth_chain.iter()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + }; + } + println!( + "chunk missed with len {}, event hits2: {}, misses2: {}", + chunk_cache.len(), + hits2, + misses2 + ); + let chunk_cache = Arc::new(chunk_cache); + services().rooms + .auth_chain.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + full_auth_chain.extend(chunk_cache.iter()); + } + + println!( + "total: {}, chunk hits: {}, misses: {}", + full_auth_chain.len(), + hits, + misses + ); + + Ok(full_auth_chain + .into_iter() + .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) + } + + #[tracing::instrument(skip(self, event_id))] + fn get_auth_chain_inner( + &self, + room_id: &RoomId, + event_id: &EventId, + ) -> Result> { + let mut todo = vec![Arc::from(event_id)]; + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop() { + match services().rooms.timeline.get_pdu(&event_id) { + Ok(Some(pdu)) => { + if pdu.room_id != room_id { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); + } + for auth_event in &pdu.auth_events { + let sauthevent = services() + .rooms.short + .get_or_create_shorteventid(auth_event)?; + + if !found.contains(&sauthevent) { + found.insert(sauthevent); + todo.push(auth_event.clone()); + } + } + } + Ok(None) => { + warn!("Could not find pdu mentioned in auth events: {}", event_id); + } + Err(e) => { + warn!("Could not load event in auth chain: {} {}", event_id, e); + } + } + } + + Ok(found) + } } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index fb289941..e85afef6 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::RoomId; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 73b7b5a5..d6578977 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::collections::HashMap; +use std::{collections::HashMap, sync::Arc}; pub use data::Data; use ruma::{RoomId, UserId, events::presence::PresenceEvent}; @@ -7,7 +7,7 @@ use ruma::{RoomId, UserId, events::presence::PresenceEvent}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 2a4c0b7f..17708772 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 16a135f8..37520560 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index ac3cca6a..79f93b50 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -72,13 +72,15 @@ impl Service { )); } - services() + if services() .rooms - .is_disabled(room_id)? - .ok_or(Error::BadRequest( + .metadata + .is_disabled(room_id)? { + return Err(Error::BadRequest( ErrorKind::Forbidden, "Federation of this room is currently disabled on this server.", - ))?; + )); + } // 1. Skip the PDU if we already have it as a timeline event if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { @@ -111,7 +113,7 @@ impl Service { } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (sorted_prev_events, eventid_info) = self.fetch_unknown_prev_events( + let (sorted_prev_events, mut eventid_info) = self.fetch_unknown_prev_events( origin, &create_event, room_id, @@ -122,14 +124,15 @@ impl Service { let mut errors = 0; for prev_id in dbg!(sorted_prev_events) { // Check for disabled again because it might have changed - services() + if services() .rooms - .is_disabled(room_id)? - .ok_or(Error::BadRequest( + .metadata + .is_disabled(room_id)? { + return Err(Error::BadRequest( ErrorKind::Forbidden, - "Federation of - this room is currently disabled on this server.", - ))?; + "Federation of this room is currently disabled on this server.", + )); + } if let Some((time, tries)) = services() .globals @@ -279,14 +282,14 @@ impl Service { Err(e) => { // Drop warn!("Dropping bad event {}: {}", event_id, e); - return Err("Signature verification failed".to_owned()); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Signature verification failed")); } Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); match ruma::signatures::redact(&value, room_version_id) { Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_owned()), + Err(_) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Redaction failed")), } } Ok(ruma::signatures::Verified::All) => value, @@ -480,7 +483,7 @@ impl Service { let mut okay = true; for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = services().rooms.get_pdu(prev_eventid) { + let prev_event = if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) { pdu } else { okay = false; @@ -488,7 +491,7 @@ impl Service { }; let sstatehash = - if let Ok(Some(s)) = services().rooms.pdu_shortstatehash(prev_eventid) { + if let Ok(Some(s)) = services().rooms.state_accessor.pdu_shortstatehash(prev_eventid) { s } else { okay = false; @@ -525,7 +528,7 @@ impl Service { let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok((ty, st_key)) = services().rooms.get_statekey_from_short(k) { + if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) { // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType state.insert((ty.to_string().into(), st_key), id.clone()); @@ -539,7 +542,7 @@ impl Service { services() .rooms .auth_chain - .get_auth_chain(room_id, starting_events, services()) + .get_auth_chain(room_id, starting_events) .await? .collect(), ); @@ -551,7 +554,7 @@ impl Service { let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { - let res = services().rooms.get_pdu(id); + let res = services().rooms.timeline.get_pdu(id); if let Err(e) = &res { error!("LOOK AT ME Failed to fetch event: {}", e); } @@ -677,7 +680,7 @@ impl Service { .and_then(|event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten()) }, ) - .map_err(|_e| "Auth check failed.".to_owned())?; + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if !check_result { return Err(Error::bad_database("Event has failed auth check with state at the event.")); @@ -714,7 +717,7 @@ impl Service { // Only keep those extremities were not referenced yet extremities - .retain(|id| !matches!(services().rooms.is_event_referenced(room_id, id), Ok(true))); + .retain(|id| !matches!(services().rooms.pdu_metadata.is_event_referenced(room_id, id), Ok(true))); info!("Compressing state at event"); let state_ids_compressed = state_at_incoming_event @@ -722,7 +725,8 @@ impl Service { .map(|(shortstatekey, id)| { services() .rooms - .compress_state_event(*shortstatekey, id)? + .state_compressor + .compress_state_event(*shortstatekey, id) }) .collect::>()?; @@ -731,6 +735,7 @@ impl Service { let auth_events = services() .rooms + .state .get_auth_events( room_id, &incoming_pdu.kind, @@ -744,10 +749,10 @@ impl Service { &incoming_pdu, None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), - )?; + ).map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if soft_fail { - self.append_incoming_pdu( + services().rooms.timeline.append_incoming_pdu( &incoming_pdu, val, extremities.iter().map(std::ops::Deref::deref), @@ -760,8 +765,9 @@ impl Service { warn!("Event was soft failed: {:?}", incoming_pdu); services() .rooms + .pdu_metadata .mark_event_soft_failed(&incoming_pdu.event_id)?; - return Err("Event has been soft failed".into()); + return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); } if incoming_pdu.state_key.is_some() { @@ -798,14 +804,14 @@ impl Service { "Found extremity pdu with no statehash in db: {:?}", leaf_pdu ); - "Found pdu with no statehash in db.".to_owned() + Error::bad_database("Found pdu with no statehash in db.") })?, leaf_pdu, ); } _ => { error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); + return Err(Error::BadDatabase("Missing state snapshot.")); } } } @@ -835,7 +841,7 @@ impl Service { let mut update_state = false; // 14. Use state resolution to find new room state let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); + panic!("State is empty"); } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { info!("State resolution trivial"); // There was only one state, so it has to be the room's current state (because that is @@ -845,7 +851,8 @@ impl Service { .map(|(k, id)| { services() .rooms - .compress_state_event(*k, id)? + .state_compressor + .compress_state_event(*k, id) }) .collect::>()? } else { @@ -877,9 +884,8 @@ impl Service { .filter_map(|(k, id)| { services() .rooms - .get_statekey_from_short(k)? - // FIXME: Undo .to_string().into() when StateMap - // is updated to use StateEventType + .short + .get_statekey_from_short(k) .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) .ok() }) @@ -895,7 +901,7 @@ impl Service { &fork_states, auth_chain_sets, |id| { - let res = services().rooms.get_pdu(id); + let res = services().rooms.timeline.get_pdu(id); if let Err(e) = &res { error!("LOOK AT ME Failed to fetch event: {}", e); } @@ -904,7 +910,7 @@ impl Service { ) { Ok(new_state) => new_state, Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); + return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); } }; @@ -921,6 +927,7 @@ impl Service { .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; services() .rooms + .state_compressor .compress_state_event(shortstatekey, &event_id) }) .collect::>()? @@ -929,9 +936,11 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); + let (sstatehash, _, _) = services().rooms.state_compressor.save_state(room_id, new_room_state)?; services() .rooms - .force_state(room_id, new_room_state)?; + .state + .set_room_state(room_id, sstatehash, &state_lock)?; } } @@ -942,7 +951,7 @@ impl Service { // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let pdu_id = self + let pdu_id = services().rooms.timeline .append_incoming_pdu( &incoming_pdu, val, @@ -1017,7 +1026,7 @@ impl Service { // a. Look in the main timeline (pduid_pdu tree) // b. Look at outlier pdu tree // (get_pdu_json checks both) - if let Ok(Some(local_pdu)) = services().rooms.get_pdu(id) { + if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { trace!("Found {} in db", id); pdus.push((local_pdu, None)); continue; @@ -1040,7 +1049,7 @@ impl Service { tokio::task::yield_now().await; } - if let Ok(Some(_)) = services().rooms.get_pdu(&next_id) { + if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { trace!("Found {} in db", id); continue; } @@ -1140,6 +1149,7 @@ impl Service { let first_pdu_in_room = services() .rooms + .timeline .first_pdu_in_room(room_id)? .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 90dad21c..760fffee 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::{HashSet, HashMap}, sync::Mutex}; +use std::{collections::{HashSet, HashMap}, sync::{Mutex, Arc}}; pub use data::Data; use ruma::{DeviceId, UserId, RoomId}; @@ -7,7 +7,7 @@ use ruma::{DeviceId, UserId, RoomId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, lazy_load_waiting: Mutex, Box, Box, u64), HashSet>>>, } diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 9444db41..bc31ee88 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -3,4 +3,6 @@ use crate::Result; pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; + fn is_disabled(&self, room_id: &RoomId) -> Result; + fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 3c21dd19..b6cccd15 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::RoomId; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -14,4 +16,12 @@ impl Service { pub fn exists(&self, room_id: &RoomId) -> Result { self.db.exists(room_id) } + + pub fn is_disabled(&self, room_id: &RoomId) -> Result { + self.db.is_disabled(room_id) + } + + pub fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { + self.db.disable_room(room_id, disabled) + } } diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 5493ce48..d36adc4c 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{EventId, signatures::CanonicalJsonObject}; use crate::{Result, PduEvent}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index a81d05c1..4724f857 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -7,7 +7,7 @@ use ruma::{RoomId, EventId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index dc571910..ec1ad537 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use crate::Result; use ruma::RoomId; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index bc2b28f0..07a27121 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -1,2 +1,40 @@ +use std::sync::Arc; + +use ruma::{EventId, events::StateEventType, RoomId}; +use crate::Result; + pub trait Data: Send + Sync { + fn get_or_create_shorteventid( + &self, + event_id: &EventId, + ) -> Result; + + fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result>; + + fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result; + + fn get_eventid_from_short(&self, shorteventid: u64) -> Result>; + + fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>; + + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash( + &self, + state_hash: &[u8], + ) -> Result<(u64, bool)>; + + fn get_shortroomid(&self, room_id: &RoomId) -> Result>; + + fn get_or_create_shortroomid( + &self, + room_id: &RoomId, + ) -> Result; } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index a024dc67..08ce5c5a 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -7,7 +7,7 @@ use ruma::{EventId, events::StateEventType, RoomId}; use crate::{Result, Error, utils, services}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -15,29 +15,7 @@ impl Service { &self, event_id: &EventId, ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = services().globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.to_owned(), short); - - Ok(short) + self.db.get_or_create_shorteventid(event_id) } pub fn get_shortstatekey( @@ -45,36 +23,7 @@ impl Service { event_type: &StateEventType, state_key: &str, ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) + self.db.get_shortstatekey(event_type, state_key) } pub fn get_or_create_shortstatekey( @@ -82,152 +31,33 @@ impl Service { event_type: &StateEventType, state_key: &str, ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.to_string().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = services().globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) + self.db.get_or_create_shortstatekey(event_type, state_key) } pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) + self.db.get_eventid_from_short(shorteventid) } pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) + self.db.get_statekey_from_short(shortstatekey) } /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( + pub fn get_or_create_shortstatehash( &self, state_hash: &[u8], ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = services().globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) + self.db.get_or_create_shortstatehash(state_hash) } pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() + self.db.get_shortroomid(room_id) } pub fn get_or_create_shortroomid( &self, room_id: &RoomId, ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = services().globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) + self.db.get_or_create_shortroomid(room_id) } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 53859785..79807c55 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,9 +1,10 @@ mod data; -use std::{collections::HashSet, sync::Arc}; +use std::{collections::{HashSet, HashMap}, sync::Arc}; pub use data::Data; -use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType}, UserId, EventId, serde::Raw, RoomVersionId}; +use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, RoomEventType}, UserId, EventId, serde::Raw, RoomVersionId, state_res::{StateMap, self}}; use serde::Deserialize; +use tokio::sync::MutexGuard; use tracing::warn; use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; @@ -11,7 +12,7 @@ use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; use super::state_compressor::CompressedStateEvent; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -97,7 +98,7 @@ impl Service { room_id: &RoomId, state_ids_compressed: HashSet, ) -> Result { - let shorteventid = services().short.get_or_create_shorteventid(event_id)?; + let shorteventid = services().rooms.short.get_or_create_shorteventid(event_id)?; let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; @@ -109,11 +110,11 @@ impl Service { ); let (shortstatehash, already_existed) = - services().short.get_or_create_shortstatehash(&state_hash)?; + services().rooms.short.get_or_create_shortstatehash(&state_hash)?; if !already_existed { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| services().room.state_compressor.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| services().rooms.state_compressor.load_shortstatehash_info(p))?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -132,7 +133,7 @@ impl Service { } else { (state_ids_compressed, HashSet::new()) }; - services().room.state_compressor.save_state_from_diff( + services().rooms.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -141,7 +142,7 @@ impl Service { )?; } - self.db.set_event_state(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + self.db.set_event_state(shorteventid, shortstatehash)?; Ok(shortstatehash) } @@ -155,25 +156,24 @@ impl Service { &self, new_pdu: &PduEvent, ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id)?; + let shorteventid = services().rooms.short.get_or_create_shorteventid(&new_pdu.event_id)?; let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; + self.db.set_event_state(shorteventid, p)?; } if let Some(state_key) = &new_pdu.state_key { let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + .map_or_else(|| Ok(Vec::new()), |p| services().rooms.state_compressor.load_shortstatehash_info(p))?; - let shortstatekey = self.get_or_create_shortstatekey( + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( &new_pdu.kind.to_string().into(), state_key, )?; - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id)?; + let new = services().rooms.state_compressor.compress_state_event(shortstatekey, &new_pdu.event_id)?; let replaces = states_parents .last() @@ -199,7 +199,7 @@ impl Service { statediffremoved.insert(*replaces); } - self.save_state_from_diff( + services().rooms.state_compressor.save_state_from_diff( shortstatehash, statediffnew, statediffremoved, @@ -221,16 +221,16 @@ impl Service { let mut state = Vec::new(); // Add recommended events if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? + services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? { state.push(e.to_stripped_state_event()); } if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? + services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.room_state_get( + if let Some(e) = services().rooms.state_accessor.room_state_get( &invite_event.room_id, &StateEventType::RoomCanonicalAlias, "", @@ -238,16 +238,16 @@ impl Service { state.push(e.to_stripped_state_event()); } if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? + services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? { state.push(e.to_stripped_state_event()); } if let Some(e) = - self.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? + services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? { state.push(e.to_stripped_state_event()); } - if let Some(e) = self.room_state_get( + if let Some(e) = services().rooms.state_accessor.room_state_get( &invite_event.room_id, &StateEventType::RoomMember, invite_event.sender.as_str(), @@ -260,17 +260,16 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) + pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64, + mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.db.set_room_state(room_id, shortstatehash, mutex_lock) } /// Returns the room's version. #[tracing::instrument(skip(self))] pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = self.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + let create_event = services().rooms.state_accessor.room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event .as_ref() @@ -294,4 +293,50 @@ impl Service { pub fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { self.db.get_forward_extremities(room_id) } + + /// This fetches auth events from the current state. + #[tracing::instrument(skip(self))] + pub fn get_auth_events( + &self, + room_id: &RoomId, + kind: &RoomEventType, + sender: &UserId, + state_key: Option<&str>, + content: &serde_json::value::RawValue, + ) -> Result>> { + let shortstatehash = + if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + current_shortstatehash + } else { + return Ok(HashMap::new()); + }; + + let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) + .expect("content is a valid JSON object"); + + let mut sauthevents = auth_events + .into_iter() + .filter_map(|(event_type, state_key)| { + services().rooms.short.get_shortstatekey(&event_type.to_string().into(), &state_key) + .ok() + .flatten() + .map(|s| (s, (event_type, state_key))) + }) + .collect::>(); + + let full_state = services().rooms.state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + + Ok(full_state + .into_iter() + .filter_map(|compressed| services().rooms.state_compressor.parse_compressed_state_event(compressed).ok()) + .filter_map(|(shortstatekey, event_id)| { + sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) + }) + .filter_map(|(k, event_id)| services().rooms.timeline.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) + .collect()) + } } diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 1911e52f..fd299489 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -7,7 +7,7 @@ use ruma::{events::StateEventType, RoomId, EventId}; use crate::{Result, PduEvent}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -45,7 +45,7 @@ impl Service { event_type: &StateEventType, state_key: &str, ) -> Result>> { - self.db.pdu_state_get(shortstatehash, event_type, state_key) + self.db.state_get(shortstatehash, event_type, state_key) } /// Returns the state hash for this pdu. diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 18d1123e..ab6a0d6c 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -3,12 +3,23 @@ use std::{collections::HashSet, sync::Arc}; pub use data::Data; use regex::Regex; -use ruma::{RoomId, UserId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, tag::TagEvent, RoomAccountDataEventType, GlobalAccountDataEventType, direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, AnySyncStateEvent}, serde::Raw, ServerName}; - -use crate::{Result, services, utils, Error}; +use ruma::{ + events::{ + direct::{DirectEvent, DirectEventContent}, + ignored_user_list::IgnoredUserListEvent, + room::{create::RoomCreateEventContent, member::MembershipState}, + tag::{TagEvent, TagEventContent}, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, RoomAccountDataEvent, RoomAccountDataEventContent, + }, + serde::Raw, + RoomId, ServerName, UserId, +}; + +use crate::{services, utils, Error, Result}; pub struct Service { - db: Box, + db: Arc, } impl Service { @@ -45,7 +56,9 @@ impl Service { self.db.mark_as_once_joined(user_id, room_id)?; // Check if the room has a predecessor - if let Some(predecessor) = self + if let Some(predecessor) = services() + .rooms + .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, "")? .and_then(|create| serde_json::from_str(create.content.get()).ok()) .and_then(|content: RoomCreateEventContent| content.predecessor) @@ -76,27 +89,41 @@ impl Service { // .ok(); // Copy old tags to new room - if let Some(tag_event) = services().account_data.get::( - Some(&predecessor.room_id), - user_id, - RoomAccountDataEventType::Tag, - )? { - services().account_data + if let Some(tag_event) = services() + .account_data + .get( + Some(&predecessor.room_id), + user_id, + RoomAccountDataEventType::Tag, + )? + .map(|event| { + serde_json::from_str(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + { + services() + .account_data .update( Some(room_id), user_id, RoomAccountDataEventType::Tag, - &tag_event, + &tag_event?, ) .ok(); }; // Copy direct chat flag - if let Some(mut direct_event) = services().account_data.get::( + if let Some(mut direct_event) = services().account_data.get( None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), - )? { + )? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + { + let direct_event = direct_event?; let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { @@ -111,7 +138,7 @@ impl Service { None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), - &direct_event, + &serde_json::to_value(&direct_event).expect("to json always works"), )?; } }; @@ -124,13 +151,17 @@ impl Service { // We want to know if the sender is ignored by the receiver let is_ignored = services() .account_data - .get::( + .get( None, // Ignored users are in global account data user_id, // Receiver GlobalAccountDataEventType::IgnoredUserList .to_string() .into(), )? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }).transpose()? .map_or(false, |ignored| { ignored .content @@ -200,10 +231,7 @@ impl Service { } #[tracing::instrument(skip(self, room_id))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - ) -> Result>>> { + pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { let maybe = self .our_real_users_cache .read() diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index ab9f4275..0c32c4bd 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -9,7 +9,7 @@ use crate::{Result, utils, services}; use self::data::StateDiff; pub struct Service { - db: Box, + db: Arc, } pub type CompressedStateEvent = [u8; 2 * size_of::()]; @@ -67,7 +67,7 @@ impl Service { ) -> Result { let mut v = shortstatekey.to_be_bytes().to_vec(); v.extend_from_slice( - &self + &services().rooms.short .get_or_create_shorteventid(event_id)? .to_be_bytes(), ); @@ -218,7 +218,7 @@ impl Service { HashSet, // added HashSet)> // removed { - let previous_shortstatehash = self.db.current_shortstatehash(room_id)?; + let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; let state_hash = utils::calculate_hash( &new_state_ids_compressed diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index d073e865..2220b5f2 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -5,6 +5,7 @@ use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; use crate::{Result, PduEvent}; pub trait Data: Send + Sync { + fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>>; fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; /// Returns the `count` of this pdu's id. diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index e8f42053..78172255 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -21,33 +21,14 @@ use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduE use super::state_compressor::CompressedStateEvent; pub struct Service { - db: Box, + db: Arc, } impl Service { - /* - /// Checks if a room exists. #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() + self.db.first_pdu_in_room(room_id) } - */ #[tracing::instrument(skip(self))] pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { @@ -681,7 +662,8 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip_all)] - fn append_incoming_pdu<'a>( + pub fn append_incoming_pdu<'a>( + &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, new_room_leaves: impl IntoIterator + Clone + Debug, diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 7c7dfae6..394a550a 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{RoomId, UserId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 8ab557f6..fde251b7 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -448,14 +448,6 @@ impl Service { Ok(()) } - #[tracing::instrument(skip(keys))] - fn calculate_hash(keys: &[&[u8]]) -> Vec { - // We only hash the pdu's event ids, not the whole pdu - let bytes = keys.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().to_owned() - } - /// Cleanup event data /// Used for instance after we remove an appservice registration /// diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index a9c516cf..d7066e24 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,11 +1,13 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{UserId, DeviceId, TransactionId}; use crate::Result; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 01c0d2f6..73b2273d 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,4 +1,6 @@ mod data; +use std::sync::Arc; + pub use data::Data; use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType, IncomingUserIdentifier}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; @@ -7,7 +9,7 @@ use tracing::error; use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index b13ae1f2..2cf18765 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::BTreeMap, mem}; +use std::{collections::BTreeMap, mem, sync::Arc}; pub use data::Data; use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition, error::ErrorKind}, RoomAliasId}; @@ -7,7 +7,7 @@ use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTi use crate::{Result, Error, services}; pub struct Service { - db: Box, + db: Arc, } impl Service { diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 734da2a8..0ee3ae84 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -3,6 +3,7 @@ pub mod error; use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; +use ring::digest; use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, fmt, @@ -59,7 +60,7 @@ pub fn random_string(length: usize) -> String { } /// Calculate a new hash for the given password -pub fn calculate_hash(password: &str) -> Result { +pub fn calculate_password_hash(password: &str) -> Result { let hashing_config = Config { variant: Variant::Argon2id, ..Default::default() @@ -69,6 +70,15 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } +#[tracing::instrument(skip(keys))] +pub fn calculate_hash(keys: &[&[u8]]) -> Vec { + // We only hash the pdu's event ids, not the whole pdu + let bytes = keys.join(&0xff); + let hash = digest::digest(&digest::SHA256, &bytes); + hash.as_ref().to_owned() +} + + pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, From 33a2b2b7729bb40253fd174d99ad773869b5ecfe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 20:33:55 +0200 Subject: [PATCH 390/445] 37 errors left --- src/api/client_server/membership.rs | 3 +- src/database/key_value/rooms/search.rs | 19 +- src/database/key_value/rooms/state.rs | 2 +- src/database/key_value/rooms/state_cache.rs | 488 +++++++++++++++++++- src/database/key_value/rooms/timeline.rs | 43 +- src/service/mod.rs | 6 +- src/service/rooms/event_handler/mod.rs | 6 +- src/service/rooms/state/data.rs | 2 +- src/service/rooms/state/mod.rs | 8 + src/service/rooms/state_accessor/mod.rs | 8 +- src/service/rooms/state_cache/data.rs | 95 +++- src/service/rooms/state_cache/mod.rs | 368 +-------------- src/service/rooms/state_compressor/mod.rs | 26 +- src/service/rooms/timeline/data.rs | 5 + src/service/rooms/timeline/mod.rs | 120 +++-- src/service/sending/mod.rs | 4 +- src/service/transaction_ids/mod.rs | 19 +- src/service/uiaa/mod.rs | 20 +- src/service/users/mod.rs | 4 +- 19 files changed, 764 insertions(+), 482 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index f07f2adb..c930ce49 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -649,7 +649,8 @@ async fn join_room_by_id_helper( services().rooms.timeline.append_pdu( &parsed_pdu, join_event, - iter::once(&*parsed_pdu.event_id), + vec![(*parsed_pdu.event_id).to_owned()], + &state_lock )?; // We set the room state after inserting the pdu, so that we never have a moment in time diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index dfbdbc64..41df5441 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -54,19 +54,20 @@ impl service::rooms::search::Data for KeyValueDatabase { .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) }); - Ok(utils::common_elements(iterators, |a, b| { + let common_elements = match utils::common_elements(iterators, |a, b| { // We compare b with a because we reversed the iterator earlier b.cmp(a) - }) - .map(|iter| { - ( - Box::new(iter.map(move |id| { + }) { + Some(it) => it, + None => return Ok(None), + }; + + let mapped = common_elements.map(move |id| { let mut pduid = prefix_clone.clone(); pduid.extend_from_slice(&id); pduid - })), - words, - ) - })) + }); + + Ok(Some((Box::new(mapped), words))) } } diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index b2822b32..90ac0d55 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -49,7 +49,7 @@ impl service::rooms::state::Data for KeyValueDatabase { fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: &mut dyn Iterator, + event_ids: Vec>, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 5f054858..4043bc40 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,6 +1,9 @@ -use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw}; +use std::{collections::HashSet, sync::Arc}; -use crate::{service, database::KeyValueDatabase, services, Result}; +use regex::Regex; +use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, ServerName}; + +use crate::{service, database::KeyValueDatabase, services, Result, Error, utils}; impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -75,4 +78,485 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { Ok(()) } + + fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { + let mut joinedcount = 0_u64; + let mut invitedcount = 0_u64; + let mut joined_servers = HashSet::new(); + let mut real_users = HashSet::new(); + + for joined in self.room_members(room_id).filter_map(|r| r.ok()) { + joined_servers.insert(joined.server_name().to_owned()); + if joined.server_name() == services().globals.server_name() + && !services().users.is_deactivated(&joined).unwrap_or(true) + { + real_users.insert(joined); + } + joinedcount += 1; + } + + for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { + joined_servers.insert(invited.server_name().to_owned()); + invitedcount += 1; + } + + self.roomid_joinedcount + .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; + + self.roomid_invitedcount + .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; + + self.our_real_users_cache + .write() + .unwrap() + .insert(room_id.to_owned(), Arc::new(real_users)); + + self.appservice_in_room_cache + .write() + .unwrap() + .remove(room_id); + + Ok(()) + } + + #[tracing::instrument(skip(self, room_id))] + fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { + let maybe = self + .our_real_users_cache + .read() + .unwrap() + .get(room_id) + .cloned(); + if let Some(users) = maybe { + Ok(users) + } else { + self.update_joined_count(room_id)?; + Ok(Arc::clone( + self.our_real_users_cache + .read() + .unwrap() + .get(room_id) + .unwrap(), + )) + } + } + + #[tracing::instrument(skip(self, room_id, appservice))] + fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &(String, serde_yaml::Value), + ) -> Result { + let maybe = self + .appservice_in_room_cache + .read() + .unwrap() + .get(room_id) + .and_then(|map| map.get(&appservice.0)) + .copied(); + + if let Some(b) = maybe { + Ok(b) + } else if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + + let bridge_user_id = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, services().globals.server_name()).ok() + }); + + let in_room = bridge_user_id + .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) + || self.room_members(room_id).any(|userid| { + userid.map_or(false, |userid| { + users.iter().any(|r| r.is_match(userid.as_str())) + }) + }); + + self.appservice_in_room_cache + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default() + .insert(appservice.0.clone(), in_room); + + Ok(in_room) + } else { + Ok(false) + } + } + + /// Makes a user forget a room. + #[tracing::instrument(skip(self))] + fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + /// Returns an iterator of all servers participating in this room. + #[tracing::instrument(skip(self))] + fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomserverids.scan_prefix(prefix).map(|(key, _)| { + ServerName::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Server name in roomserverids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { + let mut key = server.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.serverroomids.get(&key).map(|o| o.is_some()) + } + + /// Returns an iterator of all rooms a server participates in (as far as we know). + #[tracing::instrument(skip(self))] + fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> Box>> + 'a> { + let mut prefix = server.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.serverroomids.scan_prefix(prefix).map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) + })) + } + + /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] + fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_joined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn room_joined_count(&self, room_id: &RoomId) -> Result> { + self.roomid_joinedcount + .get(room_id.as_bytes())? + .map(|b| { + utils::u64_from_bytes(&b) + .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + fn room_invited_count(&self, room_id: &RoomId) -> Result> { + self.roomid_invitedcount + .get(room_id.as_bytes())? + .map(|b| { + utils::u64_from_bytes(&b) + .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + }) + .transpose() + } + + /// Returns an iterator over all User IDs who ever joined a room. + #[tracing::instrument(skip(self))] + fn room_useroncejoined<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomuseroncejoinedids + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in room_useroncejoined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) + })) + } + + /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] + fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomuserid_invitecount + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_invitecount + .get(&key)? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid invitecount in db.") + })?)) + }) + } + + #[tracing::instrument(skip(self))] + fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_leftcount + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid leftcount in db.")) + }) + .transpose() + } + + /// Returns an iterator over all rooms this user joined. + #[tracing::instrument(skip(self))] + fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> Box>> + 'a> { + Box::new(self.userroomid_joined + .scan_prefix(user_id.as_bytes().to_vec()) + .map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_joined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) + })) + } + + /// Returns an iterator over all rooms a user was invited to. + #[tracing::instrument(skip(self))] + fn rooms_invited<'a>( + &'a self, + user_id: &UserId, + ) -> Box, Vec>)>> + 'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.userroomid_invitestate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok((room_id, state)) + })) + } + + #[tracing::instrument(skip(self))] + fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.userroomid_invitestate + .get(&key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok(state) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.userroomid_leftstate + .get(&key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok(state) + }) + .transpose() + } + + /// Returns an iterator over all rooms a user left. + #[tracing::instrument(skip(self))] + fn rooms_left<'a>( + &'a self, + user_id: &UserId, + ) -> Box, Vec>)>> + 'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.userroomid_leftstate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok((room_id, state)) + })) + } + + #[tracing::instrument(skip(self))] + fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) + } } diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 0b7286b2..17231867 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -187,13 +187,29 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } + fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()> { + self.pduid_pdu.insert( + pdu_id, + &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"))?; + + self.lasttimelinecount_cache + .lock() + .unwrap() + .insert(pdu.room_id.clone(), count); + + self.eventid_pduid + .insert(pdu.event_id.as_bytes(), &pdu_id)?; + self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; + + Ok(()) + } + /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; + &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"))?; Ok(()) } else { Err(Error::BadRequest( @@ -306,4 +322,27 @@ impl service::rooms::timeline::Data for KeyValueDatabase { Ok((pdu_id, pdu)) }))) } + + fn increment_notification_counts(&self, room_id: &RoomId, notifies: Vec>, highlights: Vec>) -> Result<()> { + let notifies_batch = Vec::new(); + let highlights_batch = Vec::new(); + for user in notifies { + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + notifies_batch.push(userroom_id); + } + for user in highlights { + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + highlights_batch.push(userroom_id); + } + + self.userroomid_notificationcount + .increment_batch(&mut notifies_batch.into_iter())?; + self.userroomid_highlightcount + .increment_batch(&mut highlights_batch.into_iter())?; + Ok(()) + } } diff --git a/src/service/mod.rs b/src/service/mod.rs index a772c1db..daf43293 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -3,6 +3,8 @@ use std::{ sync::{Arc, Mutex}, }; +use lru_cache::LruCache; + use crate::{Result, Config}; pub mod account_data; @@ -74,8 +76,8 @@ impl Services { state: rooms::state::Service { db: db.clone() }, state_accessor: rooms::state_accessor::Service { db: db.clone() }, state_cache: rooms::state_cache::Service { db: db.clone() }, - state_compressor: rooms::state_compressor::Service { db: db.clone() }, - timeline: rooms::timeline::Service { db: db.clone() }, + state_compressor: rooms::state_compressor::Service { db: db.clone(), stateinfo_cache: Mutex::new(LruCache::new((100.0 * config.conduit_cache_capacity_modifier) as usize,)) }, + timeline: rooms::timeline::Service { db: db.clone(), lasttimelinecount_cache: Mutex::new(HashMap::new()) }, user: rooms::user::Service { db: db.clone() }, }, transaction_ids: transaction_ids::Service { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 79f93b50..d6ec8e95 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -755,7 +755,7 @@ impl Service { services().rooms.timeline.append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(std::ops::Deref::deref), + extremities.iter().map(|e| (**e).to_owned()).collect(), state_ids_compressed, soft_fail, &state_lock, @@ -936,7 +936,7 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); - let (sstatehash, _, _) = services().rooms.state_compressor.save_state(room_id, new_room_state)?; + let sstatehash = services().rooms.state_compressor.save_state(room_id, new_room_state)?; services() .rooms .state @@ -955,7 +955,7 @@ impl Service { .append_incoming_pdu( &incoming_pdu, val, - extremities.iter().map(std::ops::Deref::deref), + extremities.iter().map(|e| (**e).to_owned()).collect(), state_ids_compressed, soft_fail, &state_lock, diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 20c177a2..8eca21d1 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -22,7 +22,7 @@ pub trait Data: Send + Sync { /// Replace the forward extremities of the room. fn set_forward_extremities<'a>(&self, room_id: &RoomId, - event_ids: &mut dyn Iterator, + event_ids: Vec>, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 79807c55..57a0e773 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -294,6 +294,14 @@ impl Service { self.db.get_forward_extremities(room_id) } + pub fn set_forward_extremities<'a>(&self, + room_id: &RoomId, + event_ids: Vec>, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.db.set_forward_extremities(room_id, event_ids, state_lock) + } + /// This fetches auth events from the current state. #[tracing::instrument(skip(self))] pub fn get_auth_events( diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index fd299489..a0f5523b 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -13,17 +13,15 @@ pub struct Service { impl Service { /// Builds a StateMap by iterating over all keys that start /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - self.db.state_full_ids(shortstatehash) + self.db.state_full_ids(shortstatehash).await } - #[tracing::instrument(skip(self))] pub async fn state_full( &self, shortstatehash: u64, ) -> Result>> { - self.db.state_full(shortstatehash) + self.db.state_full(shortstatehash).await } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). @@ -59,7 +57,7 @@ impl Service { &self, room_id: &RoomId, ) -> Result>> { - self.db.room_state_full(room_id) + self.db.room_state_full(room_id).await } /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index b9db7217..950143ff 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,4 +1,6 @@ -use ruma::{UserId, RoomId, serde::Raw, events::AnyStrippedStateEvent}; +use std::{collections::HashSet, sync::Arc}; + +use ruma::{UserId, RoomId, serde::Raw, events::{AnyStrippedStateEvent, AnySyncStateEvent}, ServerName}; use crate::Result; pub trait Data: Send + Sync { @@ -6,4 +8,95 @@ pub trait Data: Send + Sync { fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()>; fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + + fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; + + fn get_our_real_users(&self, room_id: &RoomId) -> Result>>>; + + fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &(String, serde_yaml::Value), + ) -> Result; + + /// Makes a user forget a room. + fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>; + + /// Returns an iterator of all servers participating in this room. + fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a>; + + fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result; + + /// Returns an iterator of all rooms a server participates in (as far as we know). + fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> Box>> + 'a>; + + /// Returns an iterator over all joined members of a room. + fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a>; + + fn room_joined_count(&self, room_id: &RoomId) -> Result>; + + fn room_invited_count(&self, room_id: &RoomId) -> Result>; + + /// Returns an iterator over all User IDs who ever joined a room. + fn room_useroncejoined<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a>; + + /// Returns an iterator over all invited members of a room. + fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> Box>> + 'a>; + + fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + /// Returns an iterator over all rooms this user joined. + fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> Box>> + 'a>; + + /// Returns an iterator over all rooms a user was invited to. + fn rooms_invited<'a>( + &'a self, + user_id: &UserId, + ) -> Box, Vec>)>> + 'a>; + + fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>>; + + fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>>; + + /// Returns an iterator over all rooms a user left. + fn rooms_left<'a>( + &'a self, + user_id: &UserId, + ) -> Box, Vec>)>> + 'a>; + + fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index ab6a0d6c..69bd8328 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -191,65 +191,12 @@ impl Service { #[tracing::instrument(skip(self, room_id))] pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == services().globals.server_name() - && !services().users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.to_owned(), Arc::new(real_users)); - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) + self.db.update_joined_count(room_id) } #[tracing::instrument(skip(self, room_id))] pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } + self.db.get_our_real_users(room_id) } #[tracing::instrument(skip(self, room_id, appservice))] @@ -258,71 +205,13 @@ impl Service { room_id: &RoomId, appservice: &(String, serde_yaml::Value), ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, services().globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } + self.db.appservice_in_room(room_id, appservice) } /// Makes a user forget a room. #[tracing::instrument(skip(self))] pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) + self.db.forget(room_id, user_id) } /// Returns an iterator of all servers participating in this room. @@ -331,31 +220,12 @@ impl Service { &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - ServerName::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) + self.db.room_servers(room_id) } #[tracing::instrument(skip(self))] pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) + self.db.server_in_room(server, room_id) } /// Returns an iterator of all rooms a server participates in (as far as we know). @@ -364,20 +234,7 @@ impl Service { &'a self, server: &ServerName, ) -> impl Iterator>> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) + self.db.server_rooms(server) } /// Returns an iterator over all joined members of a room. @@ -386,44 +243,17 @@ impl Service { &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) + self.db.room_members(room_id) } #[tracing::instrument(skip(self))] pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() + self.db.room_joined_count(room_id) } #[tracing::instrument(skip(self))] pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() + self.db.room_invited_count(room_id) } /// Returns an iterator over all User IDs who ever joined a room. @@ -432,24 +262,7 @@ impl Service { &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) + self.db.room_useroncejoined(room_id) } /// Returns an iterator over all invited members of a room. @@ -458,54 +271,17 @@ impl Service { &'a self, room_id: &RoomId, ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) + self.db.room_members_invited(room_id) } #[tracing::instrument(skip(self))] pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) + self.db.get_invite_count(room_id, user_id) } #[tracing::instrument(skip(self))] pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() + self.db.get_left_count(room_id, user_id) } /// Returns an iterator over all rooms this user joined. @@ -514,21 +290,7 @@ impl Service { &'a self, user_id: &UserId, ) -> impl Iterator>> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) + self.db.rooms_joined(user_id) } /// Returns an iterator over all rooms a user was invited to. @@ -537,29 +299,7 @@ impl Service { &'a self, user_id: &UserId, ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) + self.db.rooms_invited(user_id) } #[tracing::instrument(skip(self))] @@ -568,19 +308,7 @@ impl Service { user_id: &UserId, room_id: &RoomId, ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() + self.db.invite_state(user_id, room_id) } #[tracing::instrument(skip(self))] @@ -589,19 +317,7 @@ impl Service { user_id: &UserId, room_id: &RoomId, ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() + self.db.left_state(user_id, room_id) } /// Returns an iterator over all rooms a user left. @@ -610,64 +326,26 @@ impl Service { &'a self, user_id: &UserId, ) -> impl Iterator, Vec>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) + self.db.rooms_left(user_id) } #[tracing::instrument(skip(self))] pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) + self.db.once_joined(user_id, room_id) } #[tracing::instrument(skip(self))] pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) + self.db.is_joined(user_id, room_id) } #[tracing::instrument(skip(self))] pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) + self.db.is_invited(user_id, room_id) } #[tracing::instrument(skip(self))] pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) + self.db.is_left(user_id, room_id) } } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 0c32c4bd..5f2cf02d 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,7 +1,8 @@ pub mod data; -use std::{mem::size_of, sync::Arc, collections::HashSet}; +use std::{mem::size_of, sync::{Arc, Mutex}, collections::HashSet}; pub use data::Data; +use lru_cache::LruCache; use ruma::{EventId, RoomId}; use crate::{Result, utils, services}; @@ -10,6 +11,19 @@ use self::data::StateDiff; pub struct Service { db: Arc, + + pub stateinfo_cache: Mutex< + LruCache< + u64, + Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + >, + >, + } pub type CompressedStateEvent = [u8; 2 * size_of::()]; @@ -82,7 +96,7 @@ impl Service { Ok(( utils::u64_from_bytes(&compressed_event[0..size_of::()]) .expect("bytes have right length"), - self.get_eventid_from_short( + services().rooms.short.get_eventid_from_short( utils::u64_from_bytes(&compressed_event[size_of::()..]) .expect("bytes have right length"), )?, @@ -214,9 +228,7 @@ impl Service { &self, room_id: &RoomId, new_state_ids_compressed: HashSet, - ) -> Result<(u64, - HashSet, // added - HashSet)> // removed + ) -> Result { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; @@ -231,7 +243,7 @@ impl Service { services().rooms.short.get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); + return Ok(new_shortstatehash); } let states_parents = previous_shortstatehash @@ -265,6 +277,6 @@ impl Service { )?; }; - Ok((new_shortstatehash, statediffnew, statediffremoved)) + Ok(new_shortstatehash) } } diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 2220b5f2..20eae7f1 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -44,6 +44,9 @@ pub trait Data: Send + Sync { /// Returns the `count` of this pdu's id. fn pdu_count(&self, pdu_id: &[u8]) -> Result; + /// Adds a new pdu to the timeline + fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()>; + /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; @@ -71,4 +74,6 @@ pub trait Data: Send + Sync { room_id: &RoomId, from: u64, ) -> Result, PduEvent)>>>>; + + fn increment_notification_counts(&self, room_id: &RoomId, notifies: Vec>, highlights: Vec>) -> Result<()>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 78172255..f25550d5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,6 +1,7 @@ mod data; use std::borrow::Cow; -use std::sync::Arc; +use std::collections::HashMap; +use std::sync::{Arc, Mutex}; use std::{iter, collections::HashSet}; use std::fmt::Debug; @@ -22,6 +23,8 @@ use super::state_compressor::CompressedStateEvent; pub struct Service { db: Arc, + + pub(super) lasttimelinecount_cache: Mutex, u64>>, } impl Service { @@ -73,7 +76,7 @@ impl Service { &self, event_id: &EventId, ) -> Result> { - self.db.get_non_outlier_pdu(event_id) + self.db.get_non_outlier_pdu_json(event_id) } /// Returns the pdu's id. @@ -129,9 +132,10 @@ impl Service { &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: impl IntoIterator + Debug, + leaves: Vec>, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); + let shortroomid = services().rooms.short.get_shortroomid(&pdu.room_id)?.expect("room exists"); // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -141,8 +145,8 @@ impl Service { .entry("unsigned".to_owned()) .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self + if let Some(shortstatehash) = services().rooms.state_accessor.pdu_shortstatehash(&pdu.event_id).unwrap() { + if let Some(prev_state) = services().rooms.state_accessor .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) .unwrap() { @@ -161,8 +165,8 @@ impl Service { } // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; + services().rooms.pdu_metadata.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + services().rooms.state.set_forward_extremities(&pdu.room_id, leaves, state_lock)?; let mutex_insert = Arc::clone( services().globals @@ -177,37 +181,23 @@ impl Service { let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails - self.edus + services().rooms.edus.read_receipt .private_read_set(&pdu.room_id, &pdu.sender, count1)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; + services().rooms.user.reset_notification_counts(&pdu.sender, &pdu.room_id)?; let count2 = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); pdu_id.extend_from_slice(&count2.to_be_bytes()); - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - self.lasttimelinecount_cache - .lock() - .unwrap() - .insert(pdu.room_id.clone(), count2); - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; + // Insert pdu + self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?; drop(insert_lock); // See if the event matches any known pushers let power_levels: RoomPowerLevelsEventContent = services() .rooms + .state_accessor .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) @@ -221,9 +211,9 @@ impl Service { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - for user in self.get_our_real_users(&pdu.room_id)?.iter() { + for user in services().rooms.state_cache.get_our_real_users(&pdu.room_id)?.into_iter() { // Don't notify the user of their own events - if user == &pdu.sender { + if &user == &pdu.sender { continue; } @@ -231,17 +221,19 @@ impl Service { .account_data .get( None, - user, + &user, GlobalAccountDataEventType::PushRules.to_string().into(), )? + .map(|event| serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid push rules event in db."))).transpose()? .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); + .unwrap_or_else(|| Ruleset::server_default(&user)); let mut highlight = false; let mut notify = false; for action in services().pusher.get_actions( - user, + &user, &rules_for_user, &power_levels, &sync_pdu, @@ -258,27 +250,20 @@ impl Service { }; } - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - if notify { - notifies.push(userroom_id.clone()); + notifies.push(user); } if highlight { - highlights.push(userroom_id); + highlights.push(user); } - for senderkey in services().pusher.get_pusher_senderkeys(user) { + for senderkey in services().pusher.get_pusher_senderkeys(&user) { services().sending.send_push_pdu(&*pdu_id, senderkey)?; } } - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; + self.db.increment_notification_counts(&pdu.room_id, notifies, highlights); match pdu.kind { RoomEventType::RoomRedaction => { @@ -302,7 +287,7 @@ impl Service { let invite_state = match content.membership { MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; + let state = services().rooms.state.calculate_invite_state(pdu)?; Some(state) } _ => None, @@ -310,7 +295,7 @@ impl Service { // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( + services().rooms.state_cache.update_membership( &pdu.room_id, &target_user_id, content.membership, @@ -322,18 +307,17 @@ impl Service { } RoomEventType::RoomMessage => { #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, + struct ExtractBody { + body: Option, } - let content = serde_json::from_str::>(pdu.content.get()) + let content = serde_json::from_str::(pdu.content.get()) .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - services().rooms.search.index_pdu(shortroomid, pdu_id, body)?; + services().rooms.search.index_pdu(shortroomid, &pdu_id, body)?; - let admin_room = self.alias.resolve_local_alias( + let admin_room = services().rooms.alias.resolve_local_alias( <&RoomAliasId>::try_from( format!("#admins:{}", services().globals.server_name()).as_str(), ) @@ -357,7 +341,7 @@ impl Service { } for appservice in services().appservice.all()? { - if self.appservice_in_room(&pdu.room_id, &appservice)? { + if services().rooms.state_cache.appservice_in_room(&pdu.room_id, &appservice)? { services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } @@ -418,7 +402,7 @@ impl Service { .map_or(false, |state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &Regex| { - self.room_aliases(&pdu.room_id) + services().rooms.alias.local_aliases_for_room(&pdu.room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; @@ -461,6 +445,7 @@ impl Service { let create_event = services() .rooms + .state_accessor .room_state_get(room_id, &StateEventType::RoomCreate, "")?; let create_event_content: Option = create_event @@ -483,12 +468,12 @@ impl Service { RoomVersion::new(&room_version_id).expect("room version is supported"); let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; + services().rooms.state.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; // Our depth is the maximum depth of prev_events + 1 let depth = prev_events .iter() - .filter_map(|event_id| Some(services().rooms.get_pdu(event_id).ok()??.depth)) + .filter_map(|event_id| Some(services().rooms.timeline.get_pdu(event_id).ok()??.depth)) .max() .unwrap_or_else(|| uint!(0)) + uint!(1); @@ -497,7 +482,7 @@ impl Service { if let Some(state_key) = &state_key { if let Some(prev_pdu) = - self.room_state_get(room_id, &event_type.to_string().into(), state_key)? + services().rooms.state_accessor.room_state_get(room_id, &event_type.to_string().into(), state_key)? { unsigned.insert( "prev_content".to_owned(), @@ -604,7 +589,7 @@ impl Service { ); // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id)?; + let _shorteventid = services().rooms.short.get_or_create_shorteventid(&pdu.event_id)?; Ok((pdu, pdu_json)) } @@ -623,22 +608,23 @@ impl Service { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu)?; + let statehashid = services().rooms.state.append_to_state(&pdu)?; let pdu_id = self.append_pdu( &pdu, pdu_json, // Since this PDU references all pdu_leaves we can update the leaves // of the room - iter::once(&*pdu.event_id), + vec![(*pdu.event_id).to_owned()], + state_lock, )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; + services().rooms.state.set_room_state(room_id, statehashid, state_lock)?; let mut servers: HashSet> = - self.room_servers(room_id).filter_map(|r| r.ok()).collect(); + services().rooms.state_cache.room_servers(room_id).filter_map(|r| r.ok()).collect(); // In case we are kicking or banning a user, we need to inform their server of the change if pdu.kind == RoomEventType::RoomMember { @@ -666,27 +652,27 @@ impl Service { &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: impl IntoIterator + Clone + Debug, + new_room_leaves: Vec>, state_ids_compressed: HashSet, soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result>> { // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - services().rooms.set_event_state( + services().rooms.state.set_event_state( &pdu.event_id, &pdu.room_id, state_ids_compressed, )?; if soft_fail { - services().rooms + services().rooms.pdu_metadata .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - services().rooms.replace_pdu_leaves(&pdu.room_id, new_room_leaves)?; + services().rooms.state.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)?; return Ok(None); } - let pdu_id = services().rooms.append_pdu(pdu, pdu_json, new_room_leaves)?; + let pdu_id = services().rooms.timeline.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; Ok(Some(pdu_id)) } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index fde251b7..b3350959 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -6,7 +6,7 @@ use std::{ }; use crate::{ - utils, Error, PduEvent, Result, services, api::{server_server, appservice_server}, + utils::{self, calculate_hash}, Error, PduEvent, Result, services, api::{server_server, appservice_server}, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -677,7 +677,7 @@ impl Service { edus: &edu_jsons, origin_server_ts: MilliSecondsSinceUnixEpoch::now(), transaction_id: (&*base64::encode_config( - Self::calculate_hash( + calculate_hash( &events .iter() .map(|e| match e { diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index d7066e24..8d5fd0af 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -18,15 +18,7 @@ impl Service { txn_id: &TransactionId, data: &[u8], ) -> Result<()> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); - key.push(0xff); - key.extend_from_slice(txn_id.as_bytes()); - - self.userdevicetxnid_response.insert(&key, data)?; - - Ok(()) + self.db.add_txnid(user_id, device_id, txn_id, data) } pub fn existing_txnid( @@ -35,13 +27,6 @@ impl Service { device_id: Option<&DeviceId>, txn_id: &TransactionId, ) -> Result>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(device_id.map(|d| d.as_bytes()).unwrap_or_default()); - key.push(0xff); - key.extend_from_slice(txn_id.as_bytes()); - - // If there's no entry, this is a new transaction - self.userdevicetxnid_response.get(&key) + self.db.existing_txnid(user_id, device_id, txn_id) } } diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 73b2273d..5444118f 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -21,13 +21,13 @@ impl Service { uiaainfo: &UiaaInfo, json_body: &CanonicalJsonValue, ) -> Result<()> { - self.set_uiaa_request( + self.db.set_uiaa_request( user_id, device_id, uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) json_body, )?; - self.update_uiaa_session( + self.db.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session should be set"), @@ -44,7 +44,7 @@ impl Service { ) -> Result<(bool, UiaaInfo)> { let mut uiaainfo = auth .session() - .map(|session| self.get_uiaa_session(user_id, device_id, session)) + .map(|session| self.db.get_uiaa_session(user_id, device_id, session)) .unwrap_or_else(|| Ok(uiaainfo.clone()))?; if uiaainfo.session.is_none() { @@ -110,7 +110,7 @@ impl Service { } if !completed { - self.update_uiaa_session( + self.db.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session is always set"), @@ -120,7 +120,7 @@ impl Service { } // UIAA was successful! Remove this session and return true - self.update_uiaa_session( + self.db.update_uiaa_session( user_id, device_id, uiaainfo.session.as_ref().expect("session is always set"), @@ -137,14 +137,4 @@ impl Service { ) -> Option { self.db.get_uiaa_request(user_id, device_id, session) } - - fn update_uiaa_session( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - uiaainfo: Option<&UiaaInfo>, - ) -> Result<()> { - self.db.update_uiaa_session(user_id, device_id, session, uiaainfo) - } } diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 2cf18765..826e0494 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -290,7 +290,7 @@ impl Service { } pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { - self.db.devicelist_version(user_id) + self.db.get_devicelist_version(user_id) } pub fn all_devices_metadata<'a>( @@ -310,7 +310,7 @@ impl Service { // Set the password to "" to indicate a deactivated account. Hashes will never result in an // empty string, so the user will not be able to log in again. Systems like changing the // password without logging in should check if the account is deactivated. - self.userid_password.insert(user_id.as_bytes(), &[])?; + self.db.set_password(user_id, None)?; // TODO: Unhook 3PID Ok(()) From a4637e2ba1093065a6fda3fa2ad2b2b9f30eea63 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 20:34:31 +0200 Subject: [PATCH 391/445] cargo fmt --- src/api/appservice_server.rs | 8 +- src/api/client_server/account.rs | 103 ++--- src/api/client_server/alias.rs | 29 +- src/api/client_server/backup.rs | 101 +++-- src/api/client_server/capabilities.rs | 2 +- src/api/client_server/config.rs | 2 +- src/api/client_server/context.rs | 19 +- src/api/client_server/device.rs | 35 +- src/api/client_server/directory.rs | 5 +- src/api/client_server/filter.rs | 2 +- src/api/client_server/keys.rs | 80 ++-- src/api/client_server/media.rs | 16 +- src/api/client_server/membership.rs | 248 +++++++---- src/api/client_server/message.rs | 40 +- src/api/client_server/presence.rs | 5 +- src/api/client_server/profile.rs | 48 ++- src/api/client_server/push.rs | 2 +- src/api/client_server/read_marker.rs | 25 +- src/api/client_server/redact.rs | 5 +- src/api/client_server/report.rs | 2 +- src/api/client_server/room.rs | 108 +++-- src/api/client_server/search.rs | 11 +- src/api/client_server/session.rs | 34 +- src/api/client_server/state.rs | 48 ++- src/api/client_server/sync.rs | 207 +++++++--- src/api/client_server/tag.rs | 101 ++--- src/api/client_server/to_device.rs | 25 +- src/api/client_server/typing.rs | 14 +- src/api/client_server/user_directory.rs | 38 +- src/api/client_server/voip.rs | 2 +- src/api/mod.rs | 4 +- src/api/ruma_wrapper/axum.rs | 12 +- src/api/server_server.rs | 388 ++++++++++++------ src/database/abstraction/rocksdb.rs | 2 +- src/database/key_value/account_data.rs | 14 +- src/database/key_value/appservice.rs | 11 +- src/database/key_value/globals.rs | 50 +-- src/database/key_value/key_backups.rs | 24 +- src/database/key_value/media.rs | 28 +- src/database/key_value/pusher.rs | 12 +- src/database/key_value/rooms/alias.rs | 20 +- src/database/key_value/rooms/auth_chain.rs | 15 +- src/database/key_value/rooms/directory.rs | 2 +- src/database/key_value/rooms/edus/mod.rs | 4 +- src/database/key_value/rooms/edus/presence.rs | 4 +- .../key_value/rooms/edus/read_receipt.rs | 88 ++-- src/database/key_value/rooms/edus/typing.rs | 35 +- src/database/key_value/rooms/lazy_load.rs | 4 +- src/database/key_value/rooms/metadata.rs | 2 +- src/database/key_value/rooms/outlier.rs | 4 +- src/database/key_value/rooms/pdu_metadata.rs | 4 +- src/database/key_value/rooms/search.rs | 14 +- src/database/key_value/rooms/short.rs | 19 +- src/database/key_value/rooms/state.rs | 15 +- .../key_value/rooms/state_accessor.rs | 58 ++- src/database/key_value/rooms/state_cache.rs | 195 +++++---- .../key_value/rooms/state_compressor.rs | 18 +- src/database/key_value/rooms/timeline.rs | 123 +++--- src/database/key_value/rooms/user.rs | 34 +- src/database/key_value/transaction_ids.rs | 4 +- src/database/key_value/uiaa.rs | 6 +- src/database/key_value/users.rs | 138 ++++--- src/database/mod.rs | 306 +++++++------- src/lib.rs | 17 +- src/service/account_data/data.rs | 6 +- src/service/account_data/mod.rs | 4 +- src/service/admin/mod.rs | 132 +++--- src/service/globals/data.rs | 6 +- src/service/globals/mod.rs | 8 +- src/service/key_backups/data.rs | 25 +- src/service/key_backups/mod.rs | 8 +- src/service/media/data.rs | 16 +- src/service/media/mod.rs | 31 +- src/service/mod.rs | 41 +- src/service/pdu.rs | 2 +- src/service/pusher/data.rs | 10 +- src/service/pusher/mod.rs | 22 +- src/service/rooms/alias/data.rs | 18 +- src/service/rooms/alias/mod.rs | 13 +- src/service/rooms/auth_chain/data.rs | 10 +- src/service/rooms/auth_chain/mod.rs | 39 +- src/service/rooms/directory/data.rs | 2 +- src/service/rooms/edus/presence/data.rs | 2 +- src/service/rooms/edus/presence/mod.rs | 2 +- src/service/rooms/edus/read_receipt/data.rs | 18 +- src/service/rooms/edus/read_receipt/mod.rs | 2 +- src/service/rooms/edus/typing/data.rs | 4 +- src/service/rooms/edus/typing/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 330 ++++++++------- src/service/rooms/lazy_loading/data.rs | 4 +- src/service/rooms/lazy_loading/mod.rs | 20 +- src/service/rooms/metadata/data.rs | 2 +- src/service/rooms/mod.rs | 20 +- src/service/rooms/outlier/mod.rs | 4 +- src/service/rooms/pdu_metadata/data.rs | 2 +- src/service/rooms/pdu_metadata/mod.rs | 2 +- src/service/rooms/search/data.rs | 2 +- src/service/rooms/search/mod.rs | 7 +- src/service/rooms/short/data.rs | 17 +- src/service/rooms/short/mod.rs | 19 +- src/service/rooms/state/data.rs | 12 +- src/service/rooms/state/mod.rs | 181 +++++--- src/service/rooms/state_accessor/data.rs | 9 +- src/service/rooms/state_accessor/mod.rs | 9 +- src/service/rooms/state_cache/data.rs | 13 +- src/service/rooms/state_cache/mod.rs | 40 +- src/service/rooms/state_compressor/mod.rs | 46 ++- src/service/rooms/timeline/data.rs | 24 +- src/service/rooms/timeline/mod.rs | 195 ++++++--- src/service/rooms/user/data.rs | 2 +- src/service/rooms/user/mod.rs | 3 +- src/service/sending/mod.rs | 55 +-- src/service/transaction_ids/data.rs | 2 +- src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/data.rs | 2 +- src/service/uiaa/mod.rs | 21 +- src/service/users/data.rs | 28 +- src/service/users/mod.rs | 56 ++- src/utils/mod.rs | 1 - 119 files changed, 2809 insertions(+), 1783 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 1f6e2c9d..6dca60be 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, services}; +use crate::{services, utils, Error, Result}; use bytes::BytesMut; use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; use std::{fmt::Debug, mem, time::Duration}; @@ -45,7 +45,11 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = services().globals.default_client().execute(reqwest_request).await?; + let mut response = services() + .globals + .default_client() + .execute(reqwest_request) + .await?; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 6d37ce99..28d6c07f 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -1,9 +1,7 @@ use std::sync::Arc; use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; -use crate::{ - utils, Error, Result, Ruma, services, api::client_server, -}; +use crate::{api::client_server, services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ account::{ @@ -43,16 +41,18 @@ pub async fn get_register_available_route( body: Ruma, ) -> Result { // Validate user id - let user_id = - UserId::parse_with_server_name(body.username.to_lowercase(), services().globals.server_name()) - .ok() - .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == services().globals.server_name() - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let user_id = UserId::parse_with_server_name( + body.username.to_lowercase(), + services().globals.server_name(), + ) + .ok() + .filter(|user_id| { + !user_id.is_historical() && user_id.server_name() == services().globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; // Check if username is creative enough if services().users.exists(&user_id)? { @@ -95,17 +95,19 @@ pub async fn register_route( let user_id = match (&body.username, is_guest) { (Some(username), false) => { - let proposed_user_id = - UserId::parse_with_server_name(username.to_lowercase(), services().globals.server_name()) - .ok() - .filter(|user_id| { - !user_id.is_historical() - && user_id.server_name() == services().globals.server_name() - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; + let proposed_user_id = UserId::parse_with_server_name( + username.to_lowercase(), + services().globals.server_name(), + ) + .ok() + .filter(|user_id| { + !user_id.is_historical() + && user_id.server_name() == services().globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; if services().users.exists(&proposed_user_id)? { return Err(Error::BadRequest( ErrorKind::UserInUse, @@ -176,7 +178,8 @@ pub async fn register_route( // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - services().users + services() + .users .set_displayname(&user_id, Some(displayname.clone()))?; // Initial account data @@ -188,7 +191,8 @@ pub async fn register_route( content: ruma::events::push_rules::PushRulesEventContent { global: push::Ruleset::server_default(&user_id), }, - }).expect("to json always works"), + }) + .expect("to json always works"), )?; // Inhibit login does not work for guests @@ -220,7 +224,8 @@ pub async fn register_route( )?; info!("New user {} registered on this server.", user_id); - services().admin + services() + .admin .send_message(RoomMessageEventContent::notice_plain(format!( "New user {} registered on this server.", user_id @@ -229,7 +234,10 @@ pub async fn register_route( // If this is the first real user, grant them admin privileges // Note: the server user, @conduit:servername, is generated first if services().users.count()? == 2 { - services().admin.make_user_admin(&user_id, displayname).await?; + services() + .admin + .make_user_admin(&user_id, displayname) + .await?; warn!("Granting {} admin privileges as the first user", user_id); } @@ -272,26 +280,26 @@ pub async fn change_password_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - services().users + services() + .users .set_password(sender_user, Some(&body.new_password))?; if body.logout_devices { @@ -307,7 +315,8 @@ pub async fn change_password_route( } info!("User {} changed their password.", sender_user); - services().admin + services() + .admin .send_message(RoomMessageEventContent::notice_plain(format!( "User {} changed their password.", sender_user @@ -321,9 +330,7 @@ pub async fn change_password_route( /// Get user_id of the sender user. /// /// Note: Also works for Application Services -pub async fn whoami_route( - body: Ruma, -) -> Result { +pub async fn whoami_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let device_id = body.sender_device.as_ref().cloned(); @@ -361,19 +368,18 @@ pub async fn deactivate_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -387,7 +393,8 @@ pub async fn deactivate_route( services().users.deactivate_account(sender_user)?; info!("User {} deactivated their account.", sender_user); - services().admin + services() + .admin .send_message(RoomMessageEventContent::notice_plain(format!( "User {} deactivated their account.", sender_user diff --git a/src/api/client_server/alias.rs b/src/api/client_server/alias.rs index 444cc15f..b28606c1 100644 --- a/src/api/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use regex::Regex; use ruma::{ api::{ @@ -25,11 +25,18 @@ pub async fn create_alias_route( )); } - if services().rooms.alias.resolve_local_alias(&body.room_alias)?.is_some() { + if services() + .rooms + .alias + .resolve_local_alias(&body.room_alias)? + .is_some() + { return Err(Error::Conflict("Alias already exists.")); } - services().rooms.alias + services() + .rooms + .alias .set_alias(&body.room_alias, &body.room_id)?; Ok(create_alias::v3::Response::new()) @@ -69,9 +76,7 @@ pub async fn get_alias_route( get_alias_helper(&body.room_alias).await } -pub(crate) async fn get_alias_helper( - room_alias: &RoomAliasId, -) -> Result { +pub(crate) async fn get_alias_helper(room_alias: &RoomAliasId) -> Result { if room_alias.server_name() != services().globals.server_name() { let response = services() .sending @@ -115,9 +120,15 @@ pub(crate) async fn get_alias_helper( .await .is_ok() { - room_id = Some(services().rooms.alias.resolve_local_alias(room_alias)?.ok_or_else(|| { - Error::bad_config("Appservice lied to us. Room does not exist.") - })?); + room_id = Some( + services() + .rooms + .alias + .resolve_local_alias(room_alias)? + .ok_or_else(|| { + Error::bad_config("Appservice lied to us. Room does not exist.") + })?, + ); break; } } diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs index e4138938..f3d5ddc5 100644 --- a/src/api/client_server/backup.rs +++ b/src/api/client_server/backup.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::api::client::{ backup::{ add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, @@ -31,7 +31,8 @@ pub async fn update_backup_version_route( body: Ruma, ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups + services() + .key_backups .update_backup(sender_user, &body.version, &body.algorithm)?; Ok(update_backup_version::v3::Response {}) @@ -45,13 +46,13 @@ pub async fn get_latest_backup_info_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let (version, algorithm) = - services().key_backups - .get_latest_backup(sender_user)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Key backup does not exist.", - ))?; + let (version, algorithm) = services() + .key_backups + .get_latest_backup(sender_user)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; Ok(get_latest_backup_info::v3::Response { algorithm, @@ -78,8 +79,13 @@ pub async fn get_backup_info_route( Ok(get_backup_info::v3::Response { algorithm, - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, version: body.version.to_owned(), }) } @@ -94,7 +100,9 @@ pub async fn delete_backup_version_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups.delete_backup(sender_user, &body.version)?; + services() + .key_backups + .delete_backup(sender_user, &body.version)?; Ok(delete_backup_version::v3::Response {}) } @@ -136,8 +144,13 @@ pub async fn add_backup_keys_route( } Ok(add_backup_keys::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -176,8 +189,13 @@ pub async fn add_backup_keys_for_room_route( } Ok(add_backup_keys_for_room::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -214,8 +232,13 @@ pub async fn add_backup_keys_for_session_route( )?; Ok(add_backup_keys_for_session::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -274,11 +297,18 @@ pub async fn delete_backup_keys_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups.delete_all_keys(sender_user, &body.version)?; + services() + .key_backups + .delete_all_keys(sender_user, &body.version)?; Ok(delete_backup_keys::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -290,12 +320,18 @@ pub async fn delete_backup_keys_for_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups + services() + .key_backups .delete_room_keys(sender_user, &body.version, &body.room_id)?; Ok(delete_backup_keys_for_room::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } @@ -307,11 +343,20 @@ pub async fn delete_backup_keys_for_session_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().key_backups - .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?; + services().key_backups.delete_room_key( + sender_user, + &body.version, + &body.room_id, + &body.session_id, + )?; Ok(delete_backup_keys_for_session::v3::Response { - count: (services().key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: services().key_backups.get_etag(sender_user, &body.version)?, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, }) } diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index e4283b72..97529cf1 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services}; +use crate::{services, Result, Ruma}; use ruma::api::client::discovery::get_capabilities::{ self, Capabilities, RoomVersionStability, RoomVersionsCapability, }; diff --git a/src/api/client_server/config.rs b/src/api/client_server/config.rs index 36f4fcb7..dbd2b2cc 100644 --- a/src/api/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{ config::{ diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs index c407c71e..2e0f2576 100644 --- a/src/api/client_server/context.rs +++ b/src/api/client_server/context.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, events::StateEventType, @@ -49,7 +49,11 @@ pub async fn get_context_route( let room_id = base_event.room_id.clone(); - if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -141,7 +145,11 @@ pub async fn get_context_route( .expect("All rooms have state"), }; - let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; + let state_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await?; let end_token = events_after .last() @@ -156,7 +164,10 @@ pub async fn get_context_route( let mut state = Vec::new(); for (shortstatekey, id) in state_ids { - let (event_type, state_key) = services().rooms.short.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { let pdu = match services().rooms.timeline.get_pdu(&id)? { diff --git a/src/api/client_server/device.rs b/src/api/client_server/device.rs index 2f559939..d4c41786 100644 --- a/src/api/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, Ruma, services}; +use crate::{services, utils, Error, Result, Ruma}; use ruma::api::client::{ device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, @@ -55,7 +55,8 @@ pub async fn update_device_route( device.display_name = body.display_name.clone(); - services().users + services() + .users .update_device_metadata(sender_user, &body.device_id, &device)?; Ok(update_device::v3::Response {}) @@ -88,26 +89,27 @@ pub async fn delete_device_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - services().users.remove_device(sender_user, &body.device_id)?; + services() + .users + .remove_device(sender_user, &body.device_id)?; Ok(delete_device::v3::Response {}) } @@ -141,19 +143,18 @@ pub async fn delete_devices_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 2a60f672..c1b0eda5 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::{ client::{ @@ -123,7 +123,8 @@ pub(crate) async fn get_public_rooms_filtered_helper( filter: &IncomingFilter, _network: &IncomingRoomNetwork, ) -> Result { - if let Some(other_server) = server.filter(|server| *server != services().globals.server_name().as_str()) + if let Some(other_server) = + server.filter(|server| *server != services().globals.server_name().as_str()) { let response = services() .sending diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs index e0c95066..a0d5a192 100644 --- a/src/api/client_server/filter.rs +++ b/src/api/client_server/filter.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, filter::{create_filter, get_filter}, diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 4ce5d4c0..be62cc22 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -1,5 +1,5 @@ use super::SESSION_ID_LENGTH; -use crate::{utils, Error, Result, Ruma, services}; +use crate::{services, utils, Error, Result, Ruma}; use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ @@ -32,7 +32,8 @@ pub async fn upload_keys_route( let sender_device = body.sender_device.as_ref().expect("user is authenticated"); for (key_key, key_value) in &body.one_time_keys { - services().users + services() + .users .add_one_time_key(sender_user, sender_device, key_key, key_value)?; } @@ -44,16 +45,16 @@ pub async fn upload_keys_route( .get_device_keys(sender_user, sender_device)? .is_none() { - services().users.add_device_keys( - sender_user, - sender_device, - device_keys, - )?; + services() + .users + .add_device_keys(sender_user, sender_device, device_keys)?; } } Ok(upload_keys::v3::Response { - one_time_key_counts: services().users.count_one_time_keys(sender_user, sender_device)?, + one_time_key_counts: services() + .users + .count_one_time_keys(sender_user, sender_device)?, }) } @@ -69,12 +70,8 @@ pub async fn get_keys_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let response = get_keys_helper( - Some(sender_user), - &body.device_keys, - |u| u == sender_user, - ) - .await?; + let response = + get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?; Ok(response) } @@ -113,19 +110,18 @@ pub async fn upload_signing_keys_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = services().uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - services().uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -187,12 +183,9 @@ pub async fn upload_signatures_route( ))? .to_owned(), ); - services().users.sign_key( - user_id, - key_id, - signature, - sender_user, - )?; + services() + .users + .sign_key(user_id, key_id, signature, sender_user)?; } } } @@ -215,7 +208,8 @@ pub async fn get_key_changes_route( let mut device_list_updates = HashSet::new(); device_list_updates.extend( - services().users + services() + .users .keys_changed( sender_user.as_str(), body.from @@ -230,9 +224,15 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); - for room_id in services().rooms.state_cache.rooms_joined(sender_user).filter_map(|r| r.ok()) { + for room_id in services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(|r| r.ok()) + { device_list_updates.extend( - services().users + services() + .users .keys_changed( &room_id.to_string(), body.from.parse().map_err(|_| { @@ -296,12 +296,13 @@ pub(crate) async fn get_keys_helper bool>( for device_id in device_ids { let mut container = BTreeMap::new(); if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? { - let metadata = services().users.get_device_metadata(user_id, device_id)?.ok_or( - Error::BadRequest( + let metadata = services() + .users + .get_device_metadata(user_id, device_id)? + .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Tried to get keys for nonexistent device.", - ), - )?; + ))?; add_unsigned_device_display_name(&mut keys, metadata) .map_err(|_| Error::bad_database("invalid device keys in database"))?; @@ -311,7 +312,10 @@ pub(crate) async fn get_keys_helper bool>( } } - if let Some(master_key) = services().users.get_master_key(user_id, &allowed_signatures)? { + if let Some(master_key) = services() + .users + .get_master_key(user_id, &allowed_signatures)? + { master_keys.insert(user_id.to_owned(), master_key); } if let Some(self_signing_key) = services() @@ -338,7 +342,8 @@ pub(crate) async fn get_keys_helper bool>( } ( server, - services().sending + services() + .sending .send_federation_request( server, federation::keys::get_keys::v1::Request { @@ -408,7 +413,8 @@ pub(crate) async fn claim_keys_helper( let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { if let Some(one_time_keys) = - services().users + services() + .users .take_one_time_key(user_id, device_id, key_algorithm)? { let mut c = BTreeMap::new(); diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index 80cbb613..c1f5e1de 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -1,6 +1,4 @@ -use crate::{ - utils, Error, Result, Ruma, services, service::media::FileMeta, -}; +use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, media::{ @@ -37,11 +35,11 @@ pub async fn create_content_route( utils::random_string(MXC_LENGTH) ); - services().media + services() + .media .create( mxc.clone(), - body - .filename + body.filename .as_ref() .map(|filename| "inline; filename=".to_owned() + filename) .as_deref(), @@ -73,7 +71,8 @@ pub async fn get_remote_content( ) .await?; - services().media + services() + .media .create( mxc.to_string(), content_response.content_disposition.as_deref(), @@ -192,7 +191,8 @@ pub async fn get_content_thumbnail_route( ) .await?; - services().media + services() + .media .upload_thumbnail( mxc, None, diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index c930ce49..5de8ce1e 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -30,7 +30,11 @@ use std::{ }; use tracing::{debug, error, warn}; -use crate::{Result, services, PduEvent, service::pdu::{gen_event_id_canonical_json, PduBuilder}, Error, api::{server_server, client_server}, utils, Ruma}; +use crate::{ + api::{client_server, server_server}, + service::pdu::{gen_event_id_canonical_json, PduBuilder}, + services, utils, Error, PduEvent, Result, Ruma, +}; use super::get_alias_helper; @@ -47,8 +51,9 @@ pub async fn join_room_by_id_route( let mut servers = Vec::new(); // There is no body.server_name for /roomId/join servers.extend( - services().rooms - .state_cache + services() + .rooms + .state_cache .invite_state(sender_user, &body.room_id)? .unwrap_or_default() .iter() @@ -88,8 +93,9 @@ pub async fn join_room_by_id_or_alias_route( Ok(room_id) => { let mut servers = body.server_name.clone(); servers.extend( - services().rooms - .state_cache + services() + .rooms + .state_cache .invite_state(sender_user, &room_id)? .unwrap_or_default() .iter() @@ -163,8 +169,9 @@ pub async fn kick_user_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -183,7 +190,8 @@ pub async fn kick_user_route( // TODO: reason let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -250,7 +258,8 @@ pub async fn ban_user_route( )?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -286,8 +295,9 @@ pub async fn unban_user_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get( &body.room_id, &StateEventType::RoomMember, @@ -305,7 +315,8 @@ pub async fn unban_user_route( event.membership = MembershipState::Leave; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -345,7 +356,10 @@ pub async fn forget_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.state_cache.forget(&body.room_id, sender_user)?; + services() + .rooms + .state_cache + .forget(&body.room_id, sender_user)?; Ok(forget_room::v3::Response::new()) } @@ -379,7 +393,11 @@ pub async fn get_member_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -410,7 +428,11 @@ pub async fn joined_members_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -418,7 +440,12 @@ pub async fn joined_members_route( } let mut joined = BTreeMap::new(); - for user_id in services().rooms.state_cache.room_members(&body.room_id).filter_map(|r| r.ok()) { + for user_id in services() + .rooms + .state_cache + .room_members(&body.room_id) + .filter_map(|r| r.ok()) + { let display_name = services().users.displayname(&user_id)?; let avatar_url = services().users.avatar_url(&user_id)?; @@ -443,7 +470,8 @@ async fn join_room_by_id_helper( let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -481,7 +509,14 @@ async fn join_room_by_id_helper( let (make_join_response, remote_server) = make_join_response_and_server?; let room_version = match make_join_response.room_version { - Some(room_version) if services().globals.supported_room_versions().contains(&room_version) => room_version, + Some(room_version) + if services() + .globals + .supported_room_versions() + .contains(&room_version) => + { + room_version + } _ => return Err(Error::BadServerResponse("Room version is not supported")), }; @@ -568,12 +603,11 @@ async fn join_room_by_id_helper( let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); - services().rooms.event_handler.fetch_join_signing_keys( - &send_join_response, - &room_version, - &pub_key_map, - ) - .await?; + services() + .rooms + .event_handler + .fetch_join_signing_keys(&send_join_response, &room_version, &pub_key_map) + .await?; for result in send_join_response .room_state @@ -591,12 +625,15 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") })?; - services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; + services() + .rooms + .outlier + .add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &pdu.kind.to_string().into(), - state_key, - )?; + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)?; state.insert(shortstatekey, pdu.event_id.clone()); } } @@ -632,7 +669,10 @@ async fn join_room_by_id_helper( Err(_) => continue, }; - services().rooms.outlier.add_pdu_outlier(&event_id, &value)?; + services() + .rooms + .outlier + .add_pdu_outlier(&event_id, &value)?; } let shortstatehash = services().rooms.state.set_event_state( @@ -640,7 +680,12 @@ async fn join_room_by_id_helper( room_id, state .into_iter() - .map(|(k, id)| services().rooms.state_compressor.compress_state_event(k, &id)) + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) .collect::>()?, )?; @@ -650,12 +695,15 @@ async fn join_room_by_id_helper( &parsed_pdu, join_event, vec![(*parsed_pdu.event_id).to_owned()], - &state_lock + &state_lock, )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - services().rooms.state.set_room_state(room_id, shortstatehash, &state_lock)?; + services() + .rooms + .state + .set_room_state(room_id, shortstatehash, &state_lock)?; let statehashid = services().rooms.state.append_to_state(&parsed_pdu)?; } else { @@ -705,7 +753,13 @@ fn validate_and_add_event_id( )) .expect("ruma's reference hashes are valid event ids"); - let back_off = |id| match services().globals.bad_event_ratelimiter.write().unwrap().entry(id) { + let back_off = |id| match services() + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry(id) + { Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } @@ -760,7 +814,8 @@ pub(crate) async fn invite_helper<'a>( if user_id.server_name() != services().globals.server_name() { let (pdu_json, invite_room_state) = { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -781,13 +836,18 @@ pub(crate) async fn invite_helper<'a>( }) .expect("member event is valid value"); - let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event(PduBuilder { - event_type: RoomEventType::RoomMember, - content, - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, sender_user, room_id, &state_lock)?; + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + )?; let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; @@ -799,8 +859,11 @@ pub(crate) async fn invite_helper<'a>( // Generate event id let expected_event_id = format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &services().rooms.state.get_room_version(&room_id)?) - .expect("ruma can calculate reference hashes") + ruma::signatures::reference_hash( + &pdu_json, + &services().rooms.state.get_room_version(&room_id)? + ) + .expect("ruma can calculate reference hashes") ); let expected_event_id = <&EventId>::try_from(expected_event_id.as_str()) .expect("ruma's reference hashes are valid event ids"); @@ -822,8 +885,7 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match gen_event_id_canonical_json(&response.event) - { + let (event_id, value) = match gen_event_id_canonical_json(&response.event) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -847,22 +909,20 @@ pub(crate) async fn invite_helper<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id: Vec = services().rooms.event_handler.handle_incoming_pdu( - &origin, - &event_id, - room_id, - value, - true, - &pub_key_map, - ) - .await? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .await? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; // Bind to variable because of lifetimes - let servers = services().rooms.state_cache + let servers = services() + .rooms + .state_cache .room_servers(room_id) .filter_map(|r| r.ok()) .filter(|server| &**server != services().globals.server_name()); @@ -872,7 +932,11 @@ pub(crate) async fn invite_helper<'a>( return Ok(()); } - if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -880,7 +944,8 @@ pub(crate) async fn invite_helper<'a>( } let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -923,7 +988,13 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { .rooms .state_cache .rooms_joined(user_id) - .chain(services().rooms.state_cache.rooms_invited(user_id).map(|t| t.map(|(r, _)| r))) + .chain( + services() + .rooms + .state_cache + .rooms_invited(user_id) + .map(|t| t.map(|(r, _)| r)), + ) .collect::>(); for room_id in all_rooms { @@ -938,20 +1009,24 @@ pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { Ok(()) } -pub async fn leave_room( - user_id: &UserId, - room_id: &RoomId, -) -> Result<()> { +pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { // Ask a remote server if we don't have this room - if !services().rooms.metadata.exists(room_id)? && room_id.server_name() != services().globals.server_name() { + if !services().rooms.metadata.exists(room_id)? + && room_id.server_name() != services().globals.server_name() + { if let Err(e) = remote_leave_room(user_id, room_id).await { warn!("Failed to leave room {} remotely: {}", user_id, e); // Don't tell the client about this error } - let last_state = services().rooms.state_cache + let last_state = services() + .rooms + .state_cache .invite_state(user_id, room_id)? - .map_or_else(|| services().rooms.state_cache.left_state(user_id, room_id), |s| Ok(Some(s)))?; + .map_or_else( + || services().rooms.state_cache.left_state(user_id, room_id), + |s| Ok(Some(s)), + )?; // We always drop the invite, we can't rely on other servers services().rooms.state_cache.update_membership( @@ -964,7 +1039,8 @@ pub async fn leave_room( )?; } else { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -974,7 +1050,10 @@ pub async fn leave_room( let state_lock = mutex_state.lock().await; let mut event: RoomMemberEventContent = serde_json::from_str( - services().rooms.state_accessor.room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .ok_or(Error::BadRequest( ErrorKind::BadState, "Cannot leave a room you are not a member of.", @@ -1003,10 +1082,7 @@ pub async fn leave_room( Ok(()) } -async fn remote_leave_room( - user_id: &UserId, - room_id: &RoomId, -) -> Result<()> { +async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut make_leave_response_and_server = Err(Error::BadServerResponse( "No server available to assist in leaving.", )); @@ -1048,14 +1124,21 @@ async fn remote_leave_room( let (make_leave_response, remote_server) = make_leave_response_and_server?; let room_version_id = match make_leave_response.room_version { - Some(version) if services().globals.supported_room_versions().contains(&version) => version, + Some(version) + if services() + .globals + .supported_room_versions() + .contains(&version) => + { + version + } _ => return Err(Error::BadServerResponse("Room version is not supported")), }; - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; + let mut leave_event_stub = serde_json::from_str::( + make_leave_response.event.get(), + ) + .map_err(|_| Error::BadServerResponse("Invalid make_leave event json received from server."))?; // TODO: Is origin needed? leave_event_stub.insert( @@ -1099,7 +1182,8 @@ async fn remote_leave_room( // It has enough fields to be called a proper event now let leave_event = leave_event_stub; - services().sending + services() + .sending .send_federation_request( &remote_server, federation::membership::create_leave_event::v2::Request { diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index bfdc2fdb..e086e4af 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, Ruma, services, service::pdu::PduBuilder}; +use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -25,7 +25,8 @@ pub async fn send_message_event_route( let sender_device = body.sender_device.as_deref(); let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -46,7 +47,8 @@ pub async fn send_message_event_route( // Check if this is a new transaction id if let Some(response) = - services().transaction_ids + services() + .transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? { // The client might have sent a txnid of the /sendToDevice endpoint @@ -108,7 +110,11 @@ pub async fn get_message_events_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -128,8 +134,12 @@ pub async fn get_message_events_route( let to = body.to.as_ref().map(|t| t.parse()); - services().rooms - .lazy_loading.lazy_load_confirm_delivery(sender_user, sender_device, &body.room_id, from)?; + services().rooms.lazy_loading.lazy_load_confirm_delivery( + sender_user, + sender_device, + &body.room_id, + from, + )?; // Use limit or else 10 let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); @@ -149,8 +159,10 @@ pub async fn get_message_events_route( .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { - services().rooms - .timeline.pdu_count(&pdu_id) + services() + .rooms + .timeline + .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) .ok() }) @@ -187,7 +199,8 @@ pub async fn get_message_events_route( .take(limit) .filter_map(|r| r.ok()) // Filter out buggy events .filter_map(|(pdu_id, pdu)| { - services().rooms + services() + .rooms .timeline .pdu_count(&pdu_id) .map(|pdu_count| (pdu_count, pdu)) @@ -222,10 +235,11 @@ pub async fn get_message_events_route( resp.state = Vec::new(); for ll_id in &lazy_loaded { - if let Some(member_event) = - services().rooms.state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomMember, ll_id.as_str())? - { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + &body.room_id, + &StateEventType::RoomMember, + ll_id.as_str(), + )? { resp.state.push(member_event.to_state_event()); } } diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index 6a915e44..dfac3dbd 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,4 +1,4 @@ -use crate::{utils, Result, Ruma, services}; +use crate::{services, utils, Result, Ruma}; use ruma::api::client::presence::{get_presence, set_presence}; use std::time::Duration; @@ -51,7 +51,8 @@ pub async fn get_presence_route( for room_id in services() .rooms - .user.get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? + .user + .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? { let room_id = room_id?; diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 3e1d736f..5ace1777 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, Ruma, services, service::pdu::PduBuilder}; +use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; use ruma::{ api::{ client::{ @@ -24,7 +24,8 @@ pub async fn set_displayname_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().users + services() + .users .set_displayname(sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms @@ -40,8 +41,9 @@ pub async fn set_displayname_route( content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get( &room_id, &StateEventType::RoomMember, @@ -71,7 +73,8 @@ pub async fn set_displayname_route( for (pdu_builder, room_id) in all_rooms_joined { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -80,10 +83,12 @@ pub async fn set_displayname_route( ); let state_lock = mutex_state.lock().await; - let _ = services() - .rooms - .timeline - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); + let _ = services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + ); // Presence update services().rooms.edus.presence.update_presence( @@ -150,10 +155,13 @@ pub async fn set_avatar_url_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().users + services() + .users .set_avatar_url(sender_user, body.avatar_url.clone())?; - services().users.set_blurhash(sender_user, body.blurhash.clone())?; + services() + .users + .set_blurhash(sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms let all_joined_rooms: Vec<_> = services() @@ -168,8 +176,9 @@ pub async fn set_avatar_url_route( content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get( &room_id, &StateEventType::RoomMember, @@ -199,7 +208,8 @@ pub async fn set_avatar_url_route( for (pdu_builder, room_id) in all_joined_rooms { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -208,10 +218,12 @@ pub async fn set_avatar_url_route( ); let state_lock = mutex_state.lock().await; - let _ = services() - .rooms - .timeline - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock); + let _ = services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + ); // Presence update services().rooms.edus.presence.update_presence( diff --git a/src/api/client_server/push.rs b/src/api/client_server/push.rs index 12ec25dd..2301ddca 100644 --- a/src/api/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index c6d77c12..fd0e090e 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, events::RoomAccountDataEventType, @@ -34,12 +34,18 @@ pub async fn set_read_marker_route( services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - services().rooms.timeline.get_pdu_count(event)?.ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, + services() + .rooms + .timeline + .get_pdu_count(event)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, )?; - services().rooms.user + services() + .rooms + .user .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); @@ -80,7 +86,8 @@ pub async fn create_receipt_route( services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - services().rooms + services() + .rooms .timeline .get_pdu_count(&body.event_id)? .ok_or(Error::BadRequest( @@ -88,7 +95,9 @@ pub async fn create_receipt_route( "Event does not exist.", ))?, )?; - services().rooms.user + services() + .rooms + .user .reset_notification_counts(sender_user, &body.room_id)?; let mut user_receipts = BTreeMap::new(); diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index 57e442ab..ab586c01 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use crate::{Result, Ruma, services, service::pdu::PduBuilder}; +use crate::{service::pdu::PduBuilder, services, Result, Ruma}; use ruma::{ api::client::redact::redact_event, events::{room::redaction::RoomRedactionEventContent, RoomEventType}, @@ -20,7 +20,8 @@ pub async fn redact_event_route( let body = body.body; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs index efcc4348..e45820e8 100644 --- a/src/api/client_server/report.rs +++ b/src/api/client_server/report.rs @@ -1,4 +1,4 @@ -use crate::{utils::HtmlEscape, Error, Result, Ruma, services}; +use crate::{services, utils::HtmlEscape, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, room::report_content}, events::room::message, diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 939fbaa2..ca191d6a 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,5 +1,5 @@ use crate::{ - Error, Result, Ruma, service::pdu::PduBuilder, services, api::client_server::invite_helper, + api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma, }; use ruma::{ api::client::{ @@ -57,7 +57,8 @@ pub async fn create_room_route( services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -81,13 +82,19 @@ pub async fn create_room_route( .as_ref() .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length - let alias = - RoomAliasId::parse(format!("#{}:{}", localpart, services().globals.server_name())) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias.") - })?; - - if services().rooms.alias.resolve_local_alias(&alias)?.is_some() { + let alias = RoomAliasId::parse(format!( + "#{}:{}", + localpart, + services().globals.server_name() + )) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + + if services() + .rooms + .alias + .resolve_local_alias(&alias)? + .is_some() + { Err(Error::BadRequest( ErrorKind::RoomInUse, "Room alias already exists.", @@ -99,7 +106,11 @@ pub async fn create_room_route( let room_version = match body.room_version.clone() { Some(room_version) => { - if services().globals.supported_room_versions().contains(&room_version) { + if services() + .globals + .supported_room_versions() + .contains(&room_version) + { room_version } else { return Err(Error::BadRequest( @@ -338,13 +349,18 @@ pub async fn create_room_route( pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == RoomEventType::RoomEncryption && !services().globals.allow_encryption() + if pdu_builder.event_type == RoomEventType::RoomEncryption + && !services().globals.allow_encryption() { continue; } - services().rooms - .timeline.build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock)?; + services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + )?; } // 7. Events implied by name and topic @@ -412,7 +428,11 @@ pub async fn get_room_event_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -439,7 +459,11 @@ pub async fn get_room_aliases_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", @@ -449,7 +473,8 @@ pub async fn get_room_aliases_route( Ok(aliases::v3::Response { aliases: services() .rooms - .alias.local_aliases_for_room(&body.room_id) + .alias + .local_aliases_for_room(&body.room_id) .filter_map(|a| a.ok()) .collect(), }) @@ -470,7 +495,11 @@ pub async fn upgrade_room_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().globals.supported_room_versions().contains(&body.new_version) { + if !services() + .globals + .supported_room_versions() + .contains(&body.new_version) + { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", @@ -479,11 +508,14 @@ pub async fn upgrade_room_route( // Create a replacement room let replacement_room = RoomId::new(services().globals.server_name()); - services().rooms - .short.get_or_create_shortroomid(&replacement_room)?; + services() + .rooms + .short + .get_or_create_shortroomid(&replacement_room)?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -514,7 +546,8 @@ pub async fn upgrade_room_route( // Change lock to replacement room drop(state_lock); let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -525,7 +558,8 @@ pub async fn upgrade_room_route( // Get the old room creation event let mut create_event_content = serde_json::from_str::( - services().rooms + services() + .rooms .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? @@ -627,10 +661,15 @@ pub async fn upgrade_room_route( // Replicate transferable state events to the new room for event_type in transferable_state_events { - let event_content = match services().rooms.state_accessor.room_state_get(&body.room_id, &event_type, "")? { - Some(v) => v.content.clone(), - None => continue, // Skipping missing events. - }; + let event_content = + match services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &event_type, "")? + { + Some(v) => v.content.clone(), + None => continue, // Skipping missing events. + }; services().rooms.timeline.build_and_append_pdu( PduBuilder { @@ -647,14 +686,22 @@ pub async fn upgrade_room_route( } // Moves any local aliases to the new room - for alias in services().rooms.alias.local_aliases_for_room(&body.room_id).filter_map(|r| r.ok()) { - services().rooms - .alias.set_alias(&alias, &replacement_room)?; + for alias in services() + .rooms + .alias + .local_aliases_for_room(&body.room_id) + .filter_map(|r| r.ok()) + { + services() + .rooms + .alias + .set_alias(&alias, &replacement_room)?; } // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( - services().rooms + services() + .rooms .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? @@ -688,4 +735,3 @@ pub async fn upgrade_room_route( // Return the replacement room id Ok(upgrade_room::v3::Response { replacement_room }) } - diff --git a/src/api/client_server/search.rs b/src/api/client_server/search.rs index f648649b..1ba9cdfe 100644 --- a/src/api/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::api::client::{ error::ErrorKind, search::search_events::{ @@ -23,7 +23,8 @@ pub async fn search_events_route( let filter = &search_criteria.filter; let room_ids = filter.rooms.clone().unwrap_or_else(|| { - services().rooms + services() + .rooms .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) @@ -35,7 +36,11 @@ pub async fn search_events_route( let mut searches = Vec::new(); for room_id in room_ids { - if !services().rooms.state_cache.is_joined(sender_user, &room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 7feeb66c..14f1404f 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -1,5 +1,5 @@ use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{utils, Error, Result, Ruma, services}; +use crate::{services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, @@ -40,9 +40,7 @@ pub async fn get_login_types_route( /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -pub async fn login_route( - body: Ruma, -) -> Result { +pub async fn login_route(body: Ruma) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { @@ -55,15 +53,18 @@ pub async fn login_route( } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; - let user_id = - UserId::parse_with_server_name(username.to_owned(), services().globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") - })?; - let hash = services().users.password_hash(&user_id)?.ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "Wrong username or password.", - ))?; + let user_id = UserId::parse_with_server_name( + username.to_owned(), + services().globals.server_name(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."))?; + let hash = services() + .users + .password_hash(&user_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password.", + ))?; if hash.is_empty() { return Err(Error::BadRequest( @@ -121,7 +122,8 @@ pub async fn login_route( // Determine if device_id was provided and exists in the db for this user let device_exists = body.device_id.as_ref().map_or(false, |device_id| { - services().users + services() + .users .all_device_ids(&user_id) .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); @@ -156,9 +158,7 @@ pub async fn login_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -pub async fn logout_route( - body: Ruma, -) -> Result { +pub async fn logout_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index ece74536..36466b8f 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -1,8 +1,6 @@ use std::sync::Arc; -use crate::{ - Error, Result, Ruma, RumaResponse, services, service::pdu::PduBuilder, -}; +use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ error::ErrorKind, @@ -90,10 +88,14 @@ pub async fn get_state_events_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) @@ -138,10 +140,15 @@ pub async fn get_state_events_for_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - services().rooms - .state_accessor.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -162,7 +169,8 @@ pub async fn get_state_events_for_key_route( let event = services() .rooms - .state_accessor.room_state_get(&body.room_id, &body.event_type, &body.state_key)? + .state_accessor + .room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -187,10 +195,15 @@ pub async fn get_state_events_for_empty_key_route( #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - services().rooms - .state_accessor.room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -211,7 +224,8 @@ pub async fn get_state_events_for_empty_key_route( let event = services() .rooms - .state_accessor.room_state_get(&body.room_id, &body.event_type, "")? + .state_accessor + .room_state_get(&body.room_id, &body.event_type, "")? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", @@ -248,7 +262,8 @@ async fn send_state_event_for_key_helper( if alias.server_name() != services().globals.server_name() || services() .rooms - .alias.resolve_local_alias(&alias)? + .alias + .resolve_local_alias(&alias)? .filter(|room| room == room_id) // Make sure it's the right room .is_none() { @@ -262,7 +277,8 @@ async fn send_state_event_for_key_helper( } let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 9eb63831..9ce98b7a 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -1,4 +1,4 @@ -use crate::{Error, Result, Ruma, RumaResponse, services}; +use crate::{services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ filter::{IncomingFilterDefinition, LazyLoadOptions}, @@ -129,12 +129,7 @@ async fn sync_helper_wrapper( ) { let since = body.since.clone(); - let r = sync_helper( - sender_user.clone(), - sender_device.clone(), - body, - ) - .await; + let r = sync_helper(sender_user.clone(), sender_device.clone(), body).await; if let Ok((_, caching_allowed)) = r { if !caching_allowed { @@ -211,12 +206,17 @@ async fn sync_helper( // Look for device list updates of this account device_list_updates.extend( - services().users + services() + .users .keys_changed(&sender_user.to_string(), since, None) .filter_map(|r| r.ok()), ); - let all_joined_rooms = services().rooms.state_cache.rooms_joined(&sender_user).collect::>(); + let all_joined_rooms = services() + .rooms + .state_cache + .rooms_joined(&sender_user) + .collect::>(); for room_id in all_joined_rooms { let room_id = room_id?; @@ -224,7 +224,8 @@ async fn sync_helper( // Get and drop the lock to wait for remaining operations to finish // This will make sure the we have all events until next_batch let mutex_insert = Arc::clone( - services().globals + services() + .globals .roomid_mutex_insert .write() .unwrap() @@ -237,7 +238,12 @@ async fn sync_helper( let timeline_pdus; let limited; - if services().rooms.timeline.last_timeline_count(&sender_user, &room_id)? > since { + if services() + .rooms + .timeline + .last_timeline_count(&sender_user, &room_id)? + > since + { let mut non_timeline_pdus = services() .rooms .timeline @@ -250,7 +256,8 @@ async fn sync_helper( r.ok() }) .take_while(|(pduid, _)| { - services().rooms + services() + .rooms .timeline .pdu_count(pduid) .map_or(false, |count| count > since) @@ -286,24 +293,40 @@ async fn sync_helper( timeline_users.insert(event.sender.as_str().to_owned()); } - services().rooms.lazy_loading - .lazy_load_confirm_delivery(&sender_user, &sender_device, &room_id, since)?; + services().rooms.lazy_loading.lazy_load_confirm_delivery( + &sender_user, + &sender_device, + &room_id, + since, + )?; // Database queries: - let current_shortstatehash = if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { - s - } else { - error!("Room {} has no state", room_id); - continue; - }; + let current_shortstatehash = + if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + s + } else { + error!("Room {} has no state", room_id); + continue; + }; - let since_shortstatehash = services().rooms.user.get_token_shortstatehash(&room_id, since)?; + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; // Calculates joined_member_count, invited_member_count and heroes let calculate_counts = || { - let joined_member_count = services().rooms.state_cache.room_joined_count(&room_id)?.unwrap_or(0); - let invited_member_count = services().rooms.state_cache.room_invited_count(&room_id)?.unwrap_or(0); + let joined_member_count = services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or(0); + let invited_member_count = services() + .rooms + .state_cache + .room_invited_count(&room_id)? + .unwrap_or(0); // Recalculate heroes (first 5 members) let mut heroes = Vec::new(); @@ -314,7 +337,8 @@ async fn sync_helper( for hero in services() .rooms - .timeline.all_pdus(&sender_user, &room_id)? + .timeline + .all_pdus(&sender_user, &room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) .map(|(_, pdu)| { @@ -333,7 +357,10 @@ async fn sync_helper( content.membership, MembershipState::Join | MembershipState::Invite ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? - || services().rooms.state_cache.is_invited(&user_id, &room_id)?) + || services() + .rooms + .state_cache + .is_invited(&user_id, &room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -374,14 +401,21 @@ async fn sync_helper( let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - let current_state_ids = services().rooms.state_accessor.state_full_ids(current_shortstatehash).await?; + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; let mut state_events = Vec::new(); let mut lazy_loaded = HashSet::new(); let mut i = 0; for (shortstatekey, id) in current_state_ids { - let (event_type, state_key) = services().rooms.short.get_statekey_from_short(shortstatekey)?; + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; if event_type != StateEventType::RoomMember { let pdu = match services().rooms.timeline.get_pdu(&id)? { @@ -423,8 +457,11 @@ async fn sync_helper( } // Reset lazy loading because this is an initial sync - services().rooms.lazy_loading - .lazy_load_reset(&sender_user, &sender_device, &room_id)?; + services().rooms.lazy_loading.lazy_load_reset( + &sender_user, + &sender_device, + &room_id, + )?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. @@ -471,8 +508,16 @@ async fn sync_helper( let mut lazy_loaded = HashSet::new(); if since_shortstatehash != current_shortstatehash { - let current_state_ids = services().rooms.state_accessor.state_full_ids(current_shortstatehash).await?; - let since_state_ids = services().rooms.state_accessor.state_full_ids(since_shortstatehash).await?; + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; for (key, id) in current_state_ids { if body.full_state || since_state_ids.get(&key) != Some(&id) { @@ -537,13 +582,15 @@ async fn sync_helper( let encrypted_room = services() .rooms - .state_accessor.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? .is_some(); - let since_encryption = - services().rooms - .state_accessor - .state_get(since_shortstatehash, &StateEventType::RoomEncryption, "")?; + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; // Calculations: let new_encrypted_room = encrypted_room && since_encryption.is_none(); @@ -592,8 +639,9 @@ async fn sync_helper( if joined_since_last_sync && encrypted_room || new_encrypted_room { // If the user is in a new encrypted room, give them all joined users device_list_updates.extend( - services().rooms - .state_cache + services() + .rooms + .state_cache .room_members(&room_id) .flatten() .filter(|user_id| { @@ -602,8 +650,7 @@ async fn sync_helper( }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) - .unwrap_or(false) + !share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false) }), ); } @@ -625,15 +672,17 @@ async fn sync_helper( // Look for device list updates in this room device_list_updates.extend( - services().users + services() + .users .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()), ); let notification_count = if send_notification_counts { Some( - services().rooms - .user + services() + .rooms + .user .notification_count(&sender_user, &room_id)? .try_into() .expect("notification count can't go that high"), @@ -644,8 +693,9 @@ async fn sync_helper( let highlight_count = if send_notification_counts { Some( - services().rooms - .user + services() + .rooms + .user .highlight_count(&sender_user, &room_id)? .try_into() .expect("highlight count can't go that high"), @@ -657,7 +707,9 @@ async fn sync_helper( let prev_batch = timeline_pdus .first() .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { - Ok(Some(services().rooms.timeline.pdu_count(pdu_id)?.to_string())) + Ok(Some( + services().rooms.timeline.pdu_count(pdu_id)?.to_string(), + )) })?; let room_events: Vec<_> = timeline_pdus @@ -685,8 +737,11 @@ async fn sync_helper( } // Save the state after this sync so we can send the correct state diff next sync - services().rooms.user - .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; + services().rooms.user.associate_token_shortstatehash( + &room_id, + next_batch, + current_shortstatehash, + )?; let joined_room = JoinedRoom { account_data: RoomAccountData { @@ -729,11 +784,11 @@ async fn sync_helper( } // Take presence updates from this room - for (user_id, presence) in - services().rooms - .edus - .presence - .presence_since(&room_id, since)? + for (user_id, presence) in services() + .rooms + .edus + .presence + .presence_since(&room_id, since)? { match presence_updates.entry(user_id) { Entry::Vacant(v) => { @@ -765,14 +820,19 @@ async fn sync_helper( } let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = services().rooms.state_cache.rooms_left(&sender_user).collect(); + let all_left_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_left(&sender_user) + .collect(); for result in all_left_rooms { let (room_id, left_state_events) = result?; { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( - services().globals + services() + .globals .roomid_mutex_insert .write() .unwrap() @@ -783,7 +843,10 @@ async fn sync_helper( drop(insert_lock); } - let left_count = services().rooms.state_cache.get_left_count(&room_id, &sender_user)?; + let left_count = services() + .rooms + .state_cache + .get_left_count(&room_id, &sender_user)?; // Left before last sync if Some(since) >= left_count { @@ -807,14 +870,19 @@ async fn sync_helper( } let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = services().rooms.state_cache.rooms_invited(&sender_user).collect(); + let all_invited_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_invited(&sender_user) + .collect(); for result in all_invited_rooms { let (room_id, invite_state_events) = result?; { // Get and drop the lock to wait for remaining operations to finish let mutex_insert = Arc::clone( - services().globals + services() + .globals .roomid_mutex_insert .write() .unwrap() @@ -825,7 +893,10 @@ async fn sync_helper( drop(insert_lock); } - let invite_count = services().rooms.state_cache.get_invite_count(&room_id, &sender_user)?; + let invite_count = services() + .rooms + .state_cache + .get_invite_count(&room_id, &sender_user)?; // Invited before last sync if Some(since) >= invite_count { @@ -850,8 +921,10 @@ async fn sync_helper( .filter_map(|r| r.ok()) .filter_map(|other_room_id| { Some( - services().rooms - .state_accessor.room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + services() + .rooms + .state_accessor + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), ) @@ -865,7 +938,8 @@ async fn sync_helper( } // Remove all to-device events the device received *last time* - services().users + services() + .users .remove_to_device_events(&sender_user, &sender_device, since)?; let response = sync_events::v3::Response { @@ -898,7 +972,9 @@ async fn sync_helper( changed: device_list_updates.into_iter().collect(), left: device_list_left.into_iter().collect(), }, - device_one_time_keys_count: services().users.count_one_time_keys(&sender_user, &sender_device)?, + device_one_time_keys_count: services() + .users + .count_one_time_keys(&sender_user, &sender_device)?, to_device: ToDevice { events: services() .users @@ -942,8 +1018,9 @@ fn share_encrypted_room( .filter(|room_id| room_id != ignore_room) .filter_map(|other_room_id| { Some( - services().rooms - .state_accessor + services() + .rooms + .state_accessor .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") .ok()? .is_some(), diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index abf2b873..cb46d9c6 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services, Error}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::tag::{create_tag, delete_tag, get_tags}, events::{ @@ -18,21 +18,24 @@ pub async fn update_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event = services() - .account_data - .get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )?; - - let mut tags_event = event.map(|e| serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))) - .unwrap_or_else(|| Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }))?; + let event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; + + let mut tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; tags_event .content @@ -59,21 +62,24 @@ pub async fn delete_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = services() - .account_data - .get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )?; - - let mut tags_event = event.map(|e| serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))) - .unwrap_or_else(|| Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }))?; + let mut event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; + + let mut tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; tags_event.content.tags.remove(&body.tag.clone().into()); @@ -97,21 +103,24 @@ pub async fn get_tags_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = services() - .account_data - .get( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::Tag, - )?; - - let mut tags_event = event.map(|e| serde_json::from_str(e.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db."))) - .unwrap_or_else(|| Ok(TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }))?; + let mut event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; + + let mut tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; Ok(get_tags::v3::Response { tags: tags_event.content.tags, diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index 3a2f6c09..34db3f98 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -1,7 +1,7 @@ use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; -use crate::{Error, Result, Ruma, services}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::{ client::{error::ErrorKind, to_device::send_event_to_device}, @@ -54,15 +54,17 @@ pub async fn send_event_to_device_route( } match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => services().users.add_to_device_event( - sender_user, - target_user_id, - &target_device_id, - &body.event_type, - event.deserialize_as().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") - })?, - )?, + DeviceIdOrAllDevices::DeviceId(target_device_id) => { + services().users.add_to_device_event( + sender_user, + target_user_id, + &target_device_id, + &body.event_type, + event.deserialize_as().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + )? + } DeviceIdOrAllDevices::AllDevices => { for target_device_id in services().users.all_device_ids(target_user_id) { @@ -82,7 +84,8 @@ pub async fn send_event_to_device_route( } // Save transaction id with empty data - services().transaction_ids + services() + .transaction_ids .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; Ok(send_event_to_device::v3::Response {}) diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs index abb669b1..ecc926f4 100644 --- a/src/api/client_server/typing.rs +++ b/src/api/client_server/typing.rs @@ -1,4 +1,4 @@ -use crate::{utils, Error, Result, Ruma, services}; +use crate::{services, utils, Error, Result, Ruma}; use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; /// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` @@ -11,7 +11,11 @@ pub async fn create_typing_event_route( let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !services().rooms.state_cache.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You are not in this room.", @@ -25,8 +29,10 @@ pub async fn create_typing_event_route( duration.as_millis() as u64 + utils::millis_since_unix_epoch(), )?; } else { - services().rooms - .edus.typing + services() + .rooms + .edus + .typing .typing_remove(sender_user, &body.room_id)?; } diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs index c94a283e..9d7a8289 100644 --- a/src/api/client_server/user_directory.rs +++ b/src/api/client_server/user_directory.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services}; +use crate::{services, Result, Ruma}; use ruma::{ api::client::user_directory::search_users, events::{ @@ -48,22 +48,25 @@ pub async fn search_users_route( return None; } - let user_is_in_public_rooms = - services().rooms - .state_cache.rooms_joined(&user_id) - .filter_map(|r| r.ok()) - .any(|room| { - services().rooms - .state_accessor.room_state_get(&room, &StateEventType::RoomJoinRules, "") - .map_or(false, |event| { - event.map_or(false, |event| { - serde_json::from_str(event.content.get()) - .map_or(false, |r: RoomJoinRulesEventContent| { - r.join_rule == JoinRule::Public - }) - }) + let user_is_in_public_rooms = services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(|r| r.ok()) + .any(|room| { + services() + .rooms + .state_accessor + .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .map_or(false, |event| { + event.map_or(false, |event| { + serde_json::from_str(event.content.get()) + .map_or(false, |r: RoomJoinRulesEventContent| { + r.join_rule == JoinRule::Public + }) }) - }); + }) + }); if user_is_in_public_rooms { return Some(user); @@ -71,7 +74,8 @@ pub async fn search_users_route( let user_is_in_shared_rooms = services() .rooms - .user.get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) + .user + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()]) .ok()? .next() .is_some(); diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index 9917979c..dc9caaae 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,4 +1,4 @@ -use crate::{Result, Ruma, services}; +use crate::{services, Result, Ruma}; use hmac::{Hmac, Mac, NewMac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; diff --git a/src/api/mod.rs b/src/api/mod.rs index 68589be7..0d2cd664 100644 --- a/src/api/mod.rs +++ b/src/api/mod.rs @@ -1,4 +1,4 @@ -pub mod client_server; -pub mod server_server; pub mod appservice_server; +pub mod client_server; pub mod ruma_wrapper; +pub mod server_server; diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index d926b89b..ee8c9e70 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -24,7 +24,7 @@ use serde::Deserialize; use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; -use crate::{Error, Result, api::server_server, services}; +use crate::{api::server_server, services, Error, Result}; #[async_trait] impl FromRequest for Ruma @@ -197,11 +197,11 @@ where request_map.insert("content".to_owned(), json_body.clone()); }; - let keys_result = services().rooms.event_handler.fetch_signing_keys( - &x_matrix.origin, - vec![x_matrix.key.to_owned()], - ) - .await; + let keys_result = services() + .rooms + .event_handler + .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()]) + .await; let keys = match keys_result { Ok(b) => b, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 11f7ec34..dba44893 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1,6 +1,7 @@ use crate::{ api::client_server::{self, claim_keys_helper, get_keys_helper}, - utils, Error, PduEvent, Result, Ruma, services, service::pdu::{gen_event_id_canonical_json, PduBuilder}, + service::pdu::{gen_event_id_canonical_json, PduBuilder}, + services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -138,7 +139,8 @@ where let mut write_destination_to_cache = false; - let cached_result = services().globals + let cached_result = services() + .globals .actual_destination_cache .read() .unwrap() @@ -191,7 +193,10 @@ where .to_string() .into(), ); - request_map.insert("origin".to_owned(), services().globals.server_name().as_str().into()); + request_map.insert( + "origin".to_owned(), + services().globals.server_name().as_str().into(), + ); request_map.insert("destination".to_owned(), destination.as_str().into()); let mut request_json = @@ -238,7 +243,11 @@ where let url = reqwest_request.url().clone(); - let response = services().globals.federation_client().execute(reqwest_request).await; + let response = services() + .globals + .federation_client() + .execute(reqwest_request) + .await; match response { Ok(mut response) => { @@ -278,10 +287,15 @@ where if status == 200 { let response = T::IncomingResponse::try_from_http_response(http_response); if response.is_ok() && write_destination_to_cache { - services().globals.actual_destination_cache.write().unwrap().insert( - Box::::from(destination), - (actual_destination, host), - ); + services() + .globals + .actual_destination_cache + .write() + .unwrap() + .insert( + Box::::from(destination), + (actual_destination, host), + ); } response.map_err(|e| { @@ -329,9 +343,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { /// Returns: actual_destination, host header /// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names /// Numbers in comments below refer to bullet points in linked section of specification -async fn find_actual_destination( - destination: &'_ ServerName, -) -> (FedDest, FedDest) { +async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { let destination_str = destination.as_str().to_owned(); let mut hostname = destination_str.clone(); let actual_destination = match get_ip_with_port(&destination_str) { @@ -364,18 +376,24 @@ async fn find_actual_destination( // 3.3: SRV lookup successful let force_port = hostname_override.port(); - if let Ok(override_ip) = services().globals + if let Ok(override_ip) = services() + .globals .dns_resolver() .lookup_ip(hostname_override.hostname()) .await { - services().globals.tls_name_override.write().unwrap().insert( - delegated_hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + delegated_hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); } else { warn!("Using SRV record, but could not resolve to IP"); } @@ -400,15 +418,24 @@ async fn find_actual_destination( Some(hostname_override) => { let force_port = hostname_override.port(); - if let Ok(override_ip) = services().globals + if let Ok(override_ip) = services() + .globals .dns_resolver() .lookup_ip(hostname_override.hostname()) .await { - services().globals.tls_name_override.write().unwrap().insert( - hostname.clone(), - (override_ip.iter().collect(), force_port.unwrap_or(8448)), - ); + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); } else { warn!("Using SRV record, but could not resolve to IP"); } @@ -443,10 +470,9 @@ async fn find_actual_destination( (actual_destination, hostname) } -async fn query_srv_record( - hostname: &'_ str, -) -> Option { - if let Ok(Some(host_port)) = services().globals +async fn query_srv_record(hostname: &'_ str) -> Option { + if let Ok(Some(host_port)) = services() + .globals .dns_resolver() .srv_lookup(format!("_matrix._tcp.{}", hostname)) .await @@ -465,11 +491,10 @@ async fn query_srv_record( } } -async fn request_well_known( - destination: &str, -) -> Option { +async fn request_well_known(destination: &str) -> Option { let body: serde_json::Value = serde_json::from_str( - &services().globals + &services() + .globals .default_client() .get(&format!( "https://{}/.well-known/matrix/server", @@ -664,15 +689,22 @@ pub async fn send_transaction_message_route( Some(id) => id, None => { // Event is invalid - resolved_map.insert(event_id, Err(Error::bad_database("Event needs a valid RoomId."))); + resolved_map.insert( + event_id, + Err(Error::bad_database("Event needs a valid RoomId.")), + ); continue; } }; - services().rooms.event_handler.acl_check(&sender_servername, &room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &room_id)?; let mutex = Arc::clone( - services().globals + services() + .globals .roomid_mutex_federation .write() .unwrap() @@ -683,16 +715,19 @@ pub async fn send_transaction_message_route( let start_time = Instant::now(); resolved_map.insert( event_id.clone(), - services().rooms.event_handler.handle_incoming_pdu( - &sender_servername, - &event_id, - &room_id, - value, - true, - &pub_key_map, - ) - .await - .map(|_| ()), + services() + .rooms + .event_handler + .handle_incoming_pdu( + &sender_servername, + &event_id, + &room_id, + value, + true, + &pub_key_map, + ) + .await + .map(|_| ()), ); drop(mutex_lock); @@ -727,7 +762,13 @@ pub async fn send_transaction_message_route( .event_ids .iter() .filter_map(|id| { - services().rooms.timeline.get_pdu_count(id).ok().flatten().map(|r| (id, r)) + services() + .rooms + .timeline + .get_pdu_count(id) + .ok() + .flatten() + .map(|r| (id, r)) }) .max_by_key(|(_, count)| *count) { @@ -744,11 +785,11 @@ pub async fn send_transaction_message_route( content: ReceiptEventContent(receipt_content), room_id: room_id.clone(), }; - services().rooms.edus.read_receipt.readreceipt_update( - &user_id, - &room_id, - event, - )?; + services() + .rooms + .edus + .read_receipt + .readreceipt_update(&user_id, &room_id, event)?; } else { // TODO fetch missing events info!("No known event ids in read receipt: {:?}", user_updates); @@ -757,7 +798,11 @@ pub async fn send_transaction_message_route( } } Edu::Typing(typing) => { - if services().rooms.state_cache.is_joined(&typing.user_id, &typing.room_id)? { + if services() + .rooms + .state_cache + .is_joined(&typing.user_id, &typing.room_id)? + { if typing.typing { services().rooms.edus.typing.typing_add( &typing.user_id, @@ -765,16 +810,16 @@ pub async fn send_transaction_message_route( 3000 + utils::millis_since_unix_epoch(), )?; } else { - services().rooms.edus.typing.typing_remove( - &typing.user_id, - &typing.room_id, - )?; + services() + .rooms + .edus + .typing + .typing_remove(&typing.user_id, &typing.room_id)?; } } } Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { - services().users - .mark_device_key_update(&user_id)?; + services().users.mark_device_key_update(&user_id)?; } Edu::DirectToDevice(DirectDeviceContent { sender, @@ -810,7 +855,9 @@ pub async fn send_transaction_message_route( } DeviceIdOrAllDevices::AllDevices => { - for target_device_id in services().users.all_device_ids(target_user_id) { + for target_device_id in + services().users.all_device_ids(target_user_id) + { services().users.add_to_device_event( &sender, target_user_id, @@ -830,7 +877,8 @@ pub async fn send_transaction_message_route( } // Save transaction id with empty data - services().transaction_ids + services() + .transaction_ids .add_txnid(&sender, None, &message_id, &[])?; } Edu::SigningKeyUpdate(SigningKeyUpdateContent { @@ -854,7 +902,12 @@ pub async fn send_transaction_message_route( } } - Ok(send_transaction_message::v1::Response { pdus: resolved_map.into_iter().map(|(e, r)| (e, r.map_err(|e| e.to_string()))).collect() }) + Ok(send_transaction_message::v1::Response { + pdus: resolved_map + .into_iter() + .map(|(e, r)| (e, r.map_err(|e| e.to_string()))) + .collect(), + }) } /// # `GET /_matrix/federation/v1/event/{eventId}` @@ -875,7 +928,8 @@ pub async fn get_event_route( .expect("server is authenticated"); let event = services() - .rooms.timeline + .rooms + .timeline .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -887,7 +941,11 @@ pub async fn get_event_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if !services().rooms.state_cache.server_in_room(sender_servername, room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", @@ -916,14 +974,21 @@ pub async fn get_missing_events_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room", )); } - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let mut queued_events = body.latest_events.clone(); let mut events = Vec::new(); @@ -988,17 +1053,25 @@ pub async fn get_event_authorization_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let event = services() - .rooms.timeline + .rooms + .timeline .get_pdu_json(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; @@ -1010,7 +1083,11 @@ pub async fn get_event_authorization_route( let room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let auth_chain_ids = services().rooms.auth_chain.get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]).await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]) + .await?; Ok(get_event_authorization::v1::Response { auth_chain: auth_chain_ids @@ -1035,17 +1112,25 @@ pub async fn get_room_state_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let shortstatehash = services() - .rooms.state_accessor + .rooms + .state_accessor .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1053,26 +1138,39 @@ pub async fn get_room_state_route( ))?; let pdus = services() - .rooms.state_accessor + .rooms + .state_accessor .state_full_ids(shortstatehash) .await? .into_iter() .map(|(_, id)| { PduEvent::convert_to_outgoing_federation_event( - services().rooms.timeline.get_pdu_json(&id).unwrap().unwrap(), + services() + .rooms + .timeline + .get_pdu_json(&id) + .unwrap() + .unwrap(), ) }) .collect(); - let auth_chain_ids = - services().rooms.auth_chain.get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await?; Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids .map(|id| { - services().rooms.timeline.get_pdu_json(&id).map(|maybe_json| { - PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) - }) + services() + .rooms + .timeline + .get_pdu_json(&id) + .map(|maybe_json| { + PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) + }) }) .filter_map(|r| r.ok()) .collect(), @@ -1095,17 +1193,25 @@ pub async fn get_room_state_ids_route( .as_ref() .expect("server is authenticated"); - if !services().rooms.state_cache.server_in_room(sender_servername, &body.room_id)? { + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "Server is not in room.", )); } - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let shortstatehash = services() - .rooms.state_accessor + .rooms + .state_accessor .pdu_shortstatehash(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1113,15 +1219,19 @@ pub async fn get_room_state_ids_route( ))?; let pdu_ids = services() - .rooms.state_accessor + .rooms + .state_accessor .state_full_ids(shortstatehash) .await? .into_iter() .map(|(_, id)| (*id).to_owned()) .collect(); - let auth_chain_ids = - services().rooms.auth_chain.get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]).await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await?; Ok(get_room_state_ids::v1::Response { auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), @@ -1151,10 +1261,14 @@ pub async fn create_join_event_template_route( .as_ref() .expect("server is authenticated"); - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -1164,9 +1278,11 @@ pub async fn create_join_event_template_route( let state_lock = mutex_state.lock().await; // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = - services().rooms.state_accessor - .room_state_get(&body.room_id, &StateEventType::RoomJoinRules, "")?; + let join_rules_event = services().rooms.state_accessor.room_state_get( + &body.room_id, + &StateEventType::RoomJoinRules, + "", + )?; let join_rules_event_content: Option = join_rules_event .as_ref() @@ -1212,13 +1328,18 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event(PduBuilder { - event_type: RoomEventType::RoomMember, - content, - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, &body.user_id, &body.room_id, &state_lock)?; + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &body.user_id, + &body.room_id, + &state_lock, + )?; drop(state_lock); @@ -1244,12 +1365,17 @@ async fn create_join_event( )); } - services().rooms.event_handler.acl_check(&sender_servername, room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, room_id)?; // TODO: Conduit does not implement restricted join rules yet, we always reject - let join_rules_event = services() - .rooms.state_accessor - .room_state_get(room_id, &StateEventType::RoomJoinRules, "")?; + let join_rules_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomJoinRules, + "", + )?; let join_rules_event_content: Option = join_rules_event .as_ref() @@ -1275,7 +1401,8 @@ async fn create_join_event( // We need to return the state prior to joining, let's keep a reference to that here let shortstatehash = services() - .rooms.state + .rooms + .state .get_room_shortstatehash(room_id)? .ok_or(Error::BadRequest( ErrorKind::NotFound, @@ -1307,7 +1434,8 @@ async fn create_join_event( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; let mutex = Arc::clone( - services().globals + services() + .globals .roomid_mutex_federation .write() .unwrap() @@ -1315,7 +1443,10 @@ async fn create_join_event( .or_default(), ); let mutex_lock = mutex.lock().await; - let pdu_id: Vec = services().rooms.event_handler.handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) .await? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -1323,12 +1454,19 @@ async fn create_join_event( ))?; drop(mutex_lock); - let state_ids = services().rooms.state_accessor.state_full_ids(shortstatehash).await?; - let auth_chain_ids = services().rooms.auth_chain.get_auth_chain( - room_id, - state_ids.iter().map(|(_, id)| id.clone()).collect(), - ) - .await?; + let state_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain( + room_id, + state_ids.iter().map(|(_, id)| id.clone()).collect(), + ) + .await?; let servers = services() .rooms @@ -1399,9 +1537,16 @@ pub async fn create_invite_route( .as_ref() .expect("server is authenticated"); - services().rooms.event_handler.acl_check(&sender_servername, &body.room_id)?; + services() + .rooms + .event_handler + .acl_check(&sender_servername, &body.room_id)?; - if !services().globals.supported_room_versions().contains(&body.room_version) { + if !services() + .globals + .supported_room_versions() + .contains(&body.room_version) + { return Err(Error::BadRequest( ErrorKind::IncompatibleRoomVersion { room_version: body.room_version.clone(), @@ -1549,7 +1694,8 @@ pub async fn get_room_information_route( let room_id = services() .rooms - .alias.resolve_local_alias(&body.room_alias)? + .alias + .resolve_local_alias(&body.room_alias)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "Room alias not found.", @@ -1576,7 +1722,9 @@ pub async fn get_profile_information_route( let mut blurhash = None; match &body.field { - Some(ProfileField::DisplayName) => displayname = services().users.displayname(&body.user_id)?, + Some(ProfileField::DisplayName) => { + displayname = services().users.displayname(&body.user_id)? + } Some(ProfileField::AvatarUrl) => { avatar_url = services().users.avatar_url(&body.user_id)?; blurhash = services().users.blurhash(&body.user_id)? @@ -1600,18 +1748,14 @@ pub async fn get_profile_information_route( /// # `POST /_matrix/federation/v1/user/keys/query` /// /// Gets devices and identity keys for the given users. -pub async fn get_keys_route( - body: Ruma, -) -> Result { +pub async fn get_keys_route(body: Ruma) -> Result { if !services().globals.allow_federation() { return Err(Error::bad_config("Federation is disabled.")); } - let result = get_keys_helper( - None, - &body.device_keys, - |u| Some(u.server_name()) == body.sender_servername.as_deref(), - ) + let result = get_keys_helper(None, &body.device_keys, |u| { + Some(u.server_name()) == body.sender_servername.as_deref() + }) .await?; Ok(get_keys::v1::Response { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 1388dc38..07277287 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -1,4 +1,4 @@ -use super::{super::Config, watchers::Watchers, KvTree, KeyValueDatabaseEngine}; +use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree}; use crate::{utils, Result}; use std::{ future::Future, diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 5674ac07..7d2a870e 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,9 +1,15 @@ use std::collections::HashMap; -use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw, RoomId}; -use serde::{Serialize, de::DeserializeOwned}; - -use crate::{Result, database::KeyValueDatabase, service, Error, utils, services}; +use ruma::{ + api::client::{error::ErrorKind, uiaa::UiaaInfo}, + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, + serde::Raw, + signatures::CanonicalJsonValue, + DeviceId, RoomId, UserId, +}; +use serde::{de::DeserializeOwned, Serialize}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::account_data::Data for KeyValueDatabase { /// Places one event in the account data of the user and removes the previous entry. diff --git a/src/database/key_value/appservice.rs b/src/database/key_value/appservice.rs index f427ba71..9a821a65 100644 --- a/src/database/key_value/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -55,10 +55,13 @@ impl service::appservice::Data for KeyValueDatabase { } fn iter_ids<'a>(&'a self) -> Result> + 'a>> { - Ok(Box::new(self.id_appserviceregistrations.iter().map(|(id, _)| { - utils::string_from_bytes(&id) - .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) - }))) + Ok(Box::new(self.id_appserviceregistrations.iter().map( + |(id, _)| { + utils::string_from_bytes(&id).map_err(|_| { + Error::bad_database("Invalid id bytes in id_appserviceregistrations.") + }) + }, + ))) } fn all(&self) -> Result> { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 199cbf64..fafaf49e 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -2,9 +2,13 @@ use std::collections::BTreeMap; use async_trait::async_trait; use futures_util::{stream::FuturesUnordered, StreamExt}; -use ruma::{signatures::Ed25519KeyPair, UserId, DeviceId, ServerName, api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerSigningKeyId, MilliSecondsSinceUnixEpoch}; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + signatures::Ed25519KeyPair, + DeviceId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId, +}; -use crate::{Result, service, database::KeyValueDatabase, Error, utils, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; pub const COUNTER: &[u8] = b"c"; @@ -35,28 +39,24 @@ impl service::globals::Data for KeyValueDatabase { // Return when *any* user changed his key // TODO: only send for user they share a room with - futures.push( - self.todeviceid_events - .watch_prefix(&userdeviceid_prefix), - ); + futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix)); futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push( - self.userroomid_invitestate - .watch_prefix(&userid_prefix), - ); + futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix)); futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); futures.push( self.userroomid_notificationcount .watch_prefix(&userid_prefix), ); - futures.push( - self.userroomid_highlightcount - .watch_prefix(&userid_prefix), - ); + futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix)); // Events for rooms we are in - for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { + for room_id in services() + .rooms + .state_cache + .rooms_joined(user_id) + .filter_map(|r| r.ok()) + { let short_roomid = services() .rooms .short @@ -75,15 +75,9 @@ impl service::globals::Data for KeyValueDatabase { futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); // EDUs - futures.push( - self.roomid_lasttypingupdate - .watch_prefix(&roomid_bytes), - ); + futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes)); - futures.push( - self.readreceiptid_readreceipt - .watch_prefix(&roomid_prefix), - ); + futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); // Key changes futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); @@ -110,10 +104,7 @@ impl service::globals::Data for KeyValueDatabase { futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); // One time keys - futures.push( - self.userid_lastonetimekeyupdate - .watch_prefix(&userid_bytes), - ); + futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes)); futures.push(Box::pin(services().globals.rotate.watch())); @@ -238,10 +229,7 @@ impl service::globals::Data for KeyValueDatabase { } fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.global - .insert(b"version", &new_version.to_be_bytes())?; + self.global.insert(b"version", &new_version.to_be_bytes())?; Ok(()) } - - } diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index 8171451c..0738f730 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,8 +1,15 @@ use std::collections::BTreeMap; -use ruma::{UserId, serde::Raw, api::client::{backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind}, RoomId}; +use ruma::{ + api::client::{ + backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, + error::ErrorKind, + }, + serde::Raw, + RoomId, UserId, +}; -use crate::{Result, service, database::KeyValueDatabase, services, Error, utils}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::key_backups::Data for KeyValueDatabase { fn create_backup( @@ -118,11 +125,7 @@ impl service::key_backups::Data for KeyValueDatabase { .transpose() } - fn get_backup( - &self, - user_id: &UserId, - version: &str, - ) -> Result>> { + fn get_backup(&self, user_id: &UserId, version: &str) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -322,12 +325,7 @@ impl service::key_backups::Data for KeyValueDatabase { Ok(()) } - fn delete_room_keys( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - ) -> Result<()> { + fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index f0244872..de96ace1 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -1,9 +1,16 @@ use ruma::api::client::error::ErrorKind; -use crate::{database::KeyValueDatabase, service, Error, utils, Result}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::media::Data for KeyValueDatabase { - fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result> { + fn create_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + content_disposition: Option<&str>, + content_type: Option<&str>, + ) -> Result> { let mut key = mxc.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(&width.to_be_bytes()); @@ -28,14 +35,23 @@ impl service::media::Data for KeyValueDatabase { Ok(key) } - fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)> { + fn search_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + ) -> Result<(Option, Option, Vec)> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail prefix.push(0xff); - let (key, _) = self.mediaid_file.scan_prefix(prefix).next().ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; + let (key, _) = self + .mediaid_file + .scan_prefix(prefix) + .next() + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; let mut parts = key.rsplit(|&b| b == 0xff); @@ -57,9 +73,7 @@ impl service::media::Data for KeyValueDatabase { } else { Some( utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) + Error::bad_database("Content Disposition in mediaid_file is invalid unicode.") })?, ) }; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index b05e47be..15f4e26e 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -1,6 +1,9 @@ -use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; +use ruma::{ + api::client::push::{get_pushers, set_pusher}, + UserId, +}; -use crate::{service, database::KeyValueDatabase, Error, Result}; +use crate::{database::KeyValueDatabase, service, Error, Result}; impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { @@ -48,10 +51,7 @@ impl service::pusher::Data for KeyValueDatabase { .collect() } - fn get_pusher_senderkeys<'a>( - &'a self, - sender: &UserId, - ) -> Box>> { + fn get_pusher_senderkeys<'a>(&'a self, sender: &UserId) -> Box>> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 0aa8dd48..112d6eb0 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,13 +1,9 @@ -use ruma::{RoomId, RoomAliasId, api::client::error::ErrorKind}; +use ruma::{api::client::error::ErrorKind, RoomAliasId, RoomId}; -use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::alias::Data for KeyValueDatabase { - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: &RoomId - ) -> Result<()> { + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { self.alias_roomid .insert(alias.alias().as_bytes(), room_id.as_bytes())?; let mut aliasid = room_id.as_bytes().to_vec(); @@ -17,10 +13,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { Ok(()) } - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { + fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { let mut prefix = room_id.to_vec(); prefix.push(0xff); @@ -38,10 +31,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { Ok(()) } - fn resolve_local_alias( - &self, - alias: &RoomAliasId - ) -> Result>> { + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs index 49d39560..60057ac1 100644 --- a/src/database/key_value/rooms/auth_chain.rs +++ b/src/database/key_value/rooms/auth_chain.rs @@ -1,6 +1,6 @@ use std::{collections::HashSet, mem::size_of, sync::Arc}; -use crate::{service, database::KeyValueDatabase, Result, utils}; +use crate::{database::KeyValueDatabase, service, utils, Result}; impl service::rooms::auth_chain::Data for KeyValueDatabase { fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { @@ -12,14 +12,13 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase { // We only save auth chains for single events in the db if key.len() == 1 { // Check DB cache - let chain = self.shorteventid_authchain + let chain = self + .shorteventid_authchain .get(&key[0].to_be_bytes())? .map(|chain| { chain .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) + .map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct")) .collect() }); @@ -37,7 +36,6 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase { } Ok(None) - } fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { @@ -53,7 +51,10 @@ impl service::rooms::auth_chain::Data for KeyValueDatabase { } // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, auth_chain); + self.auth_chain_cache + .lock() + .unwrap() + .insert(key, auth_chain); Ok(()) } diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 727004e7..661c202d 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils, Error, Result}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::rooms::directory::Data for KeyValueDatabase { fn set_public(&self, room_id: &RoomId) -> Result<()> { diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs index b5007f89..6c652918 100644 --- a/src/database/key_value/rooms/edus/mod.rs +++ b/src/database/key_value/rooms/edus/mod.rs @@ -1,7 +1,7 @@ mod presence; -mod typing; mod read_receipt; +mod typing; -use crate::{service, database::KeyValueDatabase}; +use crate::{database::KeyValueDatabase, service}; impl service::rooms::edus::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 1477c28b..fdd51ce1 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; -use ruma::{UserId, RoomId, events::presence::PresenceEvent, presence::PresenceState, UInt}; +use ruma::{events::presence::PresenceEvent, presence::PresenceState, RoomId, UInt, UserId}; -use crate::{service, database::KeyValueDatabase, utils, Error, services, Result}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index a12e2653..c78f0f51 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,8 +1,10 @@ use std::mem; -use ruma::{UserId, RoomId, events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject}; +use ruma::{ + events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject, RoomId, UserId, +}; -use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { fn readreceipt_update( @@ -50,13 +52,15 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { &'a self, room_id: &RoomId, since: u64, - ) -> Box, - u64, - Raw, - )>, - >> { + ) -> Box< + dyn Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + >, + > { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); let prefix2 = prefix.clone(); @@ -64,42 +68,44 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { let mut first_possible_edu = prefix.clone(); first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - Box::new(self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::parse( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) + Box::new( + self.readreceiptid_readreceipt + .iter_from(&first_possible_edu, false) + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(k, v)| { + let count = utils::u64_from_bytes( + &k[prefix.len()..prefix.len() + mem::size_of::()], + ) + .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + let user_id = UserId::parse( + utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) + .map_err(|_| { + Error::bad_database("Invalid readreceiptid userid bytes in db.") + })?, + ) .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - })) + let mut json = + serde_json::from_slice::(&v).map_err(|_| { + Error::bad_database( + "Read receipt in roomlatestid_roomlatest is invalid json.", + ) + })?; + json.remove("room_id"); + + Ok(( + user_id, + count, + Raw::from_json( + serde_json::value::to_raw_value(&json) + .expect("json is valid raw value"), + ), + )) + }), + ) } - fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - ) -> Result<()> { + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(user_id.as_bytes()); diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index b7d35968..7b211e7c 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,16 +1,11 @@ use std::collections::HashSet; -use ruma::{UserId, RoomId}; +use ruma::{RoomId, UserId}; -use crate::{database::KeyValueDatabase, service, utils, Error, services, Result}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::edus::typing::Data for KeyValueDatabase { - fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - ) -> Result<()> { + fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -30,11 +25,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { Ok(()) } - fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result<()> { + fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -53,17 +44,16 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { } if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + self.roomid_lasttypingupdate.insert( + room_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; } Ok(()) } - fn last_typing_update( - &self, - room_id: &RoomId, - ) -> Result { + fn last_typing_update(&self, room_id: &RoomId) -> Result { Ok(self .roomid_lasttypingupdate .get(room_id.as_bytes())? @@ -76,10 +66,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { .unwrap_or(0)) } - fn typings_all( - &self, - room_id: &RoomId, - ) -> Result>> { + fn typings_all(&self, room_id: &RoomId) -> Result>> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -89,7 +76,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { Error::bad_database("User ID in typingid_userid is invalid unicode.") })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; user_ids.insert(user_id); } diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs index 133e1d04..a19d52cb 100644 --- a/src/database/key_value/rooms/lazy_load.rs +++ b/src/database/key_value/rooms/lazy_load.rs @@ -1,6 +1,6 @@ -use ruma::{UserId, DeviceId, RoomId}; +use ruma::{DeviceId, RoomId, UserId}; -use crate::{service, database::KeyValueDatabase, Result}; +use crate::{database::KeyValueDatabase, service, Result}; impl service::rooms::lazy_loading::Data for KeyValueDatabase { fn lazy_load_was_sent_before( diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 72f62514..63a6b1aa 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, Result, services}; +use crate::{database::KeyValueDatabase, service, services, Result}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index aa975449..2ecaadbb 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,6 +1,6 @@ -use ruma::{EventId, signatures::CanonicalJsonObject}; +use ruma::{signatures::CanonicalJsonObject, EventId}; -use crate::{service, database::KeyValueDatabase, PduEvent, Error, Result}; +use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; impl service::rooms::outlier::Data for KeyValueDatabase { fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs index f3ac414f..76ec7346 100644 --- a/src/database/key_value/rooms/pdu_metadata.rs +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use ruma::{RoomId, EventId}; +use ruma::{EventId, RoomId}; -use crate::{service, database::KeyValueDatabase, Result}; +use crate::{database::KeyValueDatabase, service, Result}; impl service::rooms::pdu_metadata::Data for KeyValueDatabase { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 41df5441..79e6a326 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -2,7 +2,7 @@ use std::mem::size_of; use ruma::RoomId; -use crate::{service, database::KeyValueDatabase, utils, Result, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Result}; impl service::rooms::search::Data for KeyValueDatabase { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { @@ -27,7 +27,9 @@ impl service::rooms::search::Data for KeyValueDatabase { room_id: &RoomId, search_string: &str, ) -> Result>>, Vec)>> { - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -63,10 +65,10 @@ impl service::rooms::search::Data for KeyValueDatabase { }; let mapped = common_elements.map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }); + let mut pduid = prefix_clone.clone(); + pduid.extend_from_slice(&id); + pduid + }); Ok(Some((Box::new(mapped), words))) } diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs index ecd12dad..c0223170 100644 --- a/src/database/key_value/rooms/short.rs +++ b/src/database/key_value/rooms/short.rs @@ -1,14 +1,11 @@ use std::sync::Arc; -use ruma::{EventId, events::StateEventType, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result, database::KeyValueDatabase, service, utils, Error, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::short::Data for KeyValueDatabase { - fn get_or_create_shorteventid( - &self, - event_id: &EventId, - ) -> Result { + fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { return Ok(*short); } @@ -180,10 +177,7 @@ impl service::rooms::short::Data for KeyValueDatabase { } /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &[u8], - ) -> Result<(u64, bool)> { + fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { Ok(match self.statehash_shortstatehash.get(state_hash)? { Some(shortstatehash) => ( utils::u64_from_bytes(&shortstatehash) @@ -209,10 +203,7 @@ impl service::rooms::short::Data for KeyValueDatabase { .transpose() } - fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - ) -> Result { + fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { Some(short) => utils::u64_from_bytes(&short) .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 90ac0d55..80a74589 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,10 +1,10 @@ -use ruma::{RoomId, EventId}; -use tokio::sync::MutexGuard; -use std::sync::Arc; +use ruma::{EventId, RoomId}; use std::collections::HashSet; use std::fmt::Debug; +use std::sync::Arc; +use tokio::sync::MutexGuard; -use crate::{service, database::KeyValueDatabase, utils, Error, Result}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::rooms::state::Data for KeyValueDatabase { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { @@ -17,9 +17,12 @@ impl service::rooms::state::Data for KeyValueDatabase { }) } - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64, + fn set_room_state( + &self, + room_id: &RoomId, + new_shortstatehash: u64, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) -> Result<()> { self.roomid_shortstatehash .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; Ok(()) diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 4d5bd4a1..39c261f3 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -1,13 +1,18 @@ -use std::{collections::{BTreeMap, HashMap}, sync::Arc}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; -use crate::{database::KeyValueDatabase, service, PduEvent, Error, utils, Result, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; use async_trait::async_trait; -use ruma::{EventId, events::StateEventType, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId}; #[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = services().rooms.state_compressor + let full_state = services() + .rooms + .state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -15,7 +20,10 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = BTreeMap::new(); let mut i = 0; for compressed in full_state.into_iter() { - let parsed = services().rooms.state_compressor.parse_compressed_state_event(compressed)?; + let parsed = services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -30,7 +38,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { &self, shortstatehash: u64, ) -> Result>> { - let full_state = services().rooms.state_compressor + let full_state = services() + .rooms + .state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -39,7 +49,10 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let mut result = HashMap::new(); let mut i = 0; for compressed in full_state { - let (_, eventid) = services().rooms.state_compressor.parse_compressed_state_event(compressed)?; + let (_, eventid) = services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed)?; if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( @@ -69,11 +82,17 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - let shortstatekey = match services().rooms.short.get_shortstatekey(event_type, state_key)? { + let shortstatekey = match services() + .rooms + .short + .get_shortstatekey(event_type, state_key)? + { Some(s) => s, None => return Ok(None), }; - let full_state = services().rooms.state_compressor + let full_state = services() + .rooms + .state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -82,7 +101,10 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { .into_iter() .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) .and_then(|compressed| { - services().rooms.state_compressor.parse_compressed_state_event(compressed) + services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed) .ok() .map(|(_, id)| id) })) @@ -96,7 +118,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { state_key: &str, ) -> Result>> { self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| services().rooms.timeline.get_pdu(&event_id)) + .map_or(Ok(None), |event_id| { + services().rooms.timeline.get_pdu(&event_id) + }) } /// Returns the state hash for this pdu. @@ -122,7 +146,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { &self, room_id: &RoomId, ) -> Result>> { - if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { self.state_full(current_shortstatehash).await } else { Ok(HashMap::new()) @@ -136,7 +162,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { self.state_get_id(current_shortstatehash, event_type, state_key) } else { Ok(None) @@ -150,7 +178,9 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { event_type: &StateEventType, state_key: &str, ) -> Result>> { - if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { self.state_get(current_shortstatehash, event_type, state_key) } else { Ok(None) diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 4043bc40..4ca6ac40 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -1,9 +1,13 @@ use std::{collections::HashSet, sync::Arc}; use regex::Regex; -use ruma::{UserId, RoomId, events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, ServerName}; +use ruma::{ + events::{AnyStrippedStateEvent, AnySyncStateEvent}, + serde::Raw, + RoomId, ServerName, UserId, +}; -use crate::{service, database::KeyValueDatabase, services, Result, Error, utils}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::state_cache::Data for KeyValueDatabase { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -31,8 +35,13 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { Ok(()) } - - fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()> { + + fn mark_as_invited( + &self, + user_id: &UserId, + room_id: &RoomId, + last_state: Option>>, + ) -> Result<()> { let mut roomuser_id = room_id.as_bytes().to_vec(); roomuser_id.push(0xff); roomuser_id.extend_from_slice(user_id.as_bytes()); @@ -46,8 +55,10 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { &serde_json::to_vec(&last_state.unwrap_or_default()) .expect("state to bytes always works"), )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.roomuserid_invitecount.insert( + &roomuser_id, + &services().globals.next_count()?.to_be_bytes(), + )?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_leftstate.remove(&userroom_id)?; @@ -69,8 +80,10 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { &userroom_id, &serde_json::to_vec(&Vec::>::new()).unwrap(), )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &services().globals.next_count()?.to_be_bytes())?; + self.roomuserid_leftcount.insert( + &roomuser_id, + &services().globals.next_count()?.to_be_bytes(), + )?; self.userroomid_joined.remove(&userroom_id)?; self.roomuserid_joined.remove(&roomuser_id)?; self.userroomid_invitestate.remove(&userroom_id)?; @@ -324,21 +337,25 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.roomuseroncejoinedids + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database( + "User ID in room_useroncejoined is invalid unicode.", + ) + })?, ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - })) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) + }), + ) } /// Returns an iterator over all invited members of a room. @@ -350,21 +367,23 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.roomuserid_invitecount + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_invited is invalid unicode.") + })?, ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - })) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) + }), + ) } #[tracing::instrument(skip(self))] @@ -403,21 +422,23 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { &'a self, user_id: &UserId, ) -> Box>> + 'a> { - Box::new(self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.userroomid_joined + .scan_prefix(user_id.as_bytes().to_vec()) + .map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_joined is invalid unicode.") + })?, ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - })) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) + }), + ) } /// Returns an iterator over all rooms a user was invited to. @@ -429,26 +450,31 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.userroomid_invitestate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + Error::bad_database("Room ID in userroomid_invited is invalid.") + })?; - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database("Invalid state in userroomid_invitestate.") + })?; - Ok((room_id, state)) - })) + Ok((room_id, state)) + }), + ) } #[tracing::instrument(skip(self))] @@ -502,26 +528,31 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), + Box::new( + self.userroomid_leftstate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, ) .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; + Error::bad_database("Room ID in userroomid_invited is invalid.") + })?; - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database("Invalid state in userroomid_leftstate.") + })?; - Ok((room_id, state)) - })) + Ok((room_id, state)) + }), + ) } #[tracing::instrument(skip(self))] diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs index aee1890c..d0a9be48 100644 --- a/src/database/key_value/rooms/state_compressor.rs +++ b/src/database/key_value/rooms/state_compressor.rs @@ -1,6 +1,10 @@ use std::{collections::HashSet, mem::size_of}; -use crate::{service::{self, rooms::state_compressor::data::StateDiff}, database::KeyValueDatabase, Error, utils, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::state_compressor::data::StateDiff}, + utils, Error, Result, +}; impl service::rooms::state_compressor::Data for KeyValueDatabase { fn get_statediff(&self, shortstatehash: u64) -> Result { @@ -10,11 +14,7 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("State hash does not exist"))?; let parent = utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - let parent = if parent != 0 { - Some(parent) - } else { - None - }; + let parent = if parent != 0 { Some(parent) } else { None }; let mut add_mode = true; let mut added = HashSet::new(); @@ -35,7 +35,11 @@ impl service::rooms::state_compressor::Data for KeyValueDatabase { i += 2 * size_of::(); } - Ok(StateDiff { parent, added, removed }) + Ok(StateDiff { + parent, + added, + removed, + }) } fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 17231867..5d684a1b 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,13 +1,17 @@ use std::{collections::hash_map, mem::size_of, sync::Arc}; -use ruma::{UserId, RoomId, api::client::error::ErrorKind, EventId, signatures::CanonicalJsonObject}; +use ruma::{ + api::client::error::ErrorKind, signatures::CanonicalJsonObject, EventId, RoomId, UserId, +}; use tracing::error; -use crate::{service, database::KeyValueDatabase, utils, Error, PduEvent, Result, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; impl service::rooms::timeline::Data for KeyValueDatabase { fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -82,10 +86,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { } /// Returns the json of a pdu. - fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { + fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result> { self.eventid_pduid .get(event_id.as_bytes())? .map(|pduid| { @@ -187,10 +188,17 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) } - fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()> { + fn append_pdu( + &self, + pdu_id: &[u8], + pdu: &PduEvent, + json: &CanonicalJsonObject, + count: u64, + ) -> Result<()> { self.pduid_pdu.insert( pdu_id, - &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"))?; + &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), + )?; self.lasttimelinecount_cache .lock() @@ -209,7 +217,8 @@ impl service::rooms::timeline::Data for KeyValueDatabase { if self.pduid_pdu.get(pdu_id)?.is_some() { self.pduid_pdu.insert( pdu_id, - &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"))?; + &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"), + )?; Ok(()) } else { Err(Error::BadRequest( @@ -227,7 +236,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, since: u64, ) -> Result, PduEvent)>>>> { - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -239,18 +250,19 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(Box::new(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }))) + Ok(Box::new( + self.pduid_pdu + .iter_from(&first_pdu_id, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) } /// Returns an iterator over all events and their tokens in a room that happened before the @@ -262,7 +274,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { until: u64, ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -275,18 +289,19 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(Box::new(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }))) + Ok(Box::new( + self.pduid_pdu + .iter_from(current, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) } fn pdus_after<'a>( @@ -296,7 +311,9 @@ impl service::rooms::timeline::Data for KeyValueDatabase { from: u64, ) -> Result, PduEvent)>>>> { // Create the first part of the full pdu id - let prefix = services().rooms.short + let prefix = services() + .rooms + .short .get_shortroomid(room_id)? .expect("room exists") .to_be_bytes() @@ -309,21 +326,27 @@ impl service::rooms::timeline::Data for KeyValueDatabase { let user_id = user_id.to_owned(); - Ok(Box::new(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - }))) + Ok(Box::new( + self.pduid_pdu + .iter_from(current, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) } - fn increment_notification_counts(&self, room_id: &RoomId, notifies: Vec>, highlights: Vec>) -> Result<()> { + fn increment_notification_counts( + &self, + room_id: &RoomId, + notifies: Vec>, + highlights: Vec>, + ) -> Result<()> { let notifies_batch = Vec::new(); let highlights_batch = Vec::new(); for user in notifies { diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 3759bda7..78c78e19 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,6 +1,6 @@ -use ruma::{UserId, RoomId}; +use ruma::{RoomId, UserId}; -use crate::{service, database::KeyValueDatabase, utils, Error, Result, services}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::user::Data for KeyValueDatabase { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { @@ -50,7 +50,11 @@ impl service::rooms::user::Data for KeyValueDatabase { token: u64, shortstatehash: u64, ) -> Result<()> { - let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists"); + let shortroomid = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); @@ -60,7 +64,11 @@ impl service::rooms::user::Data for KeyValueDatabase { } fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = services().rooms.short.get_shortroomid(room_id)?.expect("room exists"); + let shortroomid = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists"); let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(&token.to_be_bytes()); @@ -102,13 +110,15 @@ impl service::rooms::user::Data for KeyValueDatabase { }); // We use the default compare function because keys are sorted correctly (not reversed) - Ok(Box::new(Box::new(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })))) + Ok(Box::new(Box::new( + utils::common_elements(iterators, Ord::cmp) + .expect("users is not empty") + .map(|bytes| { + RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { + Error::bad_database("Invalid RoomId bytes in userroomid_joined") + })?) + .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) + }), + ))) } } diff --git a/src/database/key_value/transaction_ids.rs b/src/database/key_value/transaction_ids.rs index a63b3c5d..2ea6ad4a 100644 --- a/src/database/key_value/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,6 +1,6 @@ -use ruma::{UserId, DeviceId, TransactionId}; +use ruma::{DeviceId, TransactionId, UserId}; -use crate::{service, database::KeyValueDatabase, Result}; +use crate::{database::KeyValueDatabase, service, Result}; impl service::transaction_ids::Data for KeyValueDatabase { fn add_txnid( diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index cf242dec..8a9f1762 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,4 +1,8 @@ -use ruma::{UserId, DeviceId, signatures::CanonicalJsonValue, api::client::{uiaa::UiaaInfo, error::ErrorKind}}; +use ruma::{ + api::client::{error::ErrorKind, uiaa::UiaaInfo}, + signatures::CanonicalJsonValue, + DeviceId, UserId, +}; use crate::{database::KeyValueDatabase, service, Error, Result}; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 55a518d4..15699a16 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -1,9 +1,20 @@ -use std::{mem::size_of, collections::BTreeMap}; - -use ruma::{api::client::{filter::IncomingFilterDefinition, error::ErrorKind, device::Device}, UserId, RoomAliasId, MxcUri, DeviceId, MilliSecondsSinceUnixEpoch, DeviceKeyId, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, serde::Raw, events::{AnyToDeviceEvent, StateEventType}, DeviceKeyAlgorithm, UInt}; +use std::{collections::BTreeMap, mem::size_of}; + +use ruma::{ + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::{AnyToDeviceEvent, StateEventType}, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, + UInt, UserId, +}; use tracing::warn; -use crate::{service::{self, users::clean_signatures}, database::KeyValueDatabase, Error, utils, services, Result}; +use crate::{ + database::KeyValueDatabase, + service::{self, users::clean_signatures}, + services, utils, Error, Result, +}; impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. @@ -274,18 +285,21 @@ impl service::users::Data for KeyValueDatabase { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata - Box::new(self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? - .into()) - })) + Box::new( + self.userdeviceid_metadata + .scan_prefix(prefix) + .map(|(bytes, _)| { + Ok(utils::string_from_bytes( + bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { + Error::bad_database("UserDevice ID in db is invalid.") + })?, + ) + .map_err(|_| { + Error::bad_database("Device ID in userdeviceid_metadata is invalid.") + })? + .into()) + }), + ) } /// Replaces the access token of one device. @@ -341,8 +355,10 @@ impl service::users::Data for KeyValueDatabase { &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), )?; - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + self.userid_lastonetimekeyupdate.insert( + user_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; Ok(()) } @@ -372,8 +388,10 @@ impl service::users::Data for KeyValueDatabase { prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); prefix.push(b':'); - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &services().globals.next_count()?.to_be_bytes())?; + self.userid_lastonetimekeyupdate.insert( + user_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; self.onetimekeyid_onetimekeys .scan_prefix(prefix) @@ -617,38 +635,47 @@ impl service::users::Data for KeyValueDatabase { let to = to.unwrap_or(u64::MAX); - Box::new(self.keychangeid_userid - .iter_from(&start, false) - .take_while(move |(k, _)| { - k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { - if let Ok(c) = utils::u64_from_bytes(current) { - c <= to + Box::new( + self.keychangeid_userid + .iter_from(&start, false) + .take_while(move |(k, _)| { + k.starts_with(&prefix) + && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { + if let Ok(c) = utils::u64_from_bytes(current) { + c <= to + } else { + warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + false + } } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + warn!("BadDatabase: Could not parse keychangeid_userid"); false } - } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); - false - } - }) - .map(|(_, bytes)| { - UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - })) + }) + .map(|(_, bytes)| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "User ID in devicekeychangeid_userid is invalid unicode.", + ) + })?) + .map_err(|_| { + Error::bad_database("User ID in devicekeychangeid_userid is invalid.") + }) + }), + ) } - fn mark_device_key_update( - &self, - user_id: &UserId, - ) -> Result<()> { + fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { let count = services().globals.next_count()?.to_be_bytes(); - for room_id in services().rooms.state_cache.rooms_joined(user_id).filter_map(|r| r.ok()) { + for room_id in services() + .rooms + .state_cache + .rooms_joined(user_id) + .filter_map(|r| r.ok()) + { // Don't send key updates to unencrypted rooms - if services().rooms + if services() + .rooms .state_accessor .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() @@ -883,20 +910,19 @@ impl service::users::Data for KeyValueDatabase { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - Box::new(self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - })) + Box::new( + self.userdeviceid_metadata + .scan_prefix(key) + .map(|(_, bytes)| { + serde_json::from_slice::(&bytes).map_err(|_| { + Error::bad_database("Device in userdeviceid_metadata is invalid.") + }) + }), + ) } /// Creates a new sync filter. Returns the filter id. - fn create_filter( - &self, - user_id: &UserId, - filter: &IncomingFilterDefinition, - ) -> Result { + fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result { let filter_id = utils::random_string(4); let mut key = user_id.as_bytes().to_vec(); diff --git a/src/database/mod.rs b/src/database/mod.rs index 68684677..8a7c78e7 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,8 +1,16 @@ pub mod abstraction; pub mod key_value; -use crate::{utils, Config, Error, Result, service::{users, globals, uiaa, rooms::{self, state_compressor::CompressedStateEvent}, account_data, media, key_backups, transaction_ids, sending, appservice, pusher}, services, PduEvent, Services, SERVICES}; +use crate::{ + service::{ + account_data, appservice, globals, key_backups, media, pusher, + rooms::{self, state_compressor::CompressedStateEvent}, + sending, transaction_ids, uiaa, users, + }, + services, utils, Config, Error, PduEvent, Result, Services, SERVICES, +}; use abstraction::KeyValueDatabaseEngine; +use abstraction::KvTree; use directories::ProjectDirs; use futures_util::{stream::FuturesUnordered, StreamExt}; use lru_cache::LruCache; @@ -12,7 +20,8 @@ use ruma::{ GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, - DeviceId, EventId, RoomId, UserId, signatures::CanonicalJsonValue, + signatures::CanonicalJsonValue, + DeviceId, EventId, RoomId, UserId, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, @@ -25,7 +34,6 @@ use std::{ }; use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; use tracing::{debug, error, info, warn}; -use abstraction::KvTree; pub struct KeyValueDatabase { _db: Arc, @@ -65,9 +73,9 @@ pub struct KeyValueDatabase { pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count + pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count //pub rooms: rooms::Rooms, @@ -279,127 +287,126 @@ impl KeyValueDatabase { let db = Arc::new(Self { _db: builder.clone(), - userid_password: builder.open_tree("userid_password")?, - userid_displayname: builder.open_tree("userid_displayname")?, - userid_avatarurl: builder.open_tree("userid_avatarurl")?, - userid_blurhash: builder.open_tree("userid_blurhash")?, - userdeviceid_token: builder.open_tree("userdeviceid_token")?, - userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, - userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, - token_userdeviceid: builder.open_tree("token_userdeviceid")?, - onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, - userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, - keychangeid_userid: builder.open_tree("keychangeid_userid")?, - keyid_key: builder.open_tree("keyid_key")?, - userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, - userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, - userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, - userfilterid_filter: builder.open_tree("userfilterid_filter")?, - todeviceid_events: builder.open_tree("todeviceid_events")?, - - userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, - userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), - readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, - roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt - roomuserid_lastprivatereadupdate: builder - .open_tree("roomuserid_lastprivatereadupdate")?, - typingid_userid: builder.open_tree("typingid_userid")?, - roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, - presenceid_presence: builder.open_tree("presenceid_presence")?, - userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, - pduid_pdu: builder.open_tree("pduid_pdu")?, - eventid_pduid: builder.open_tree("eventid_pduid")?, - roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, - - alias_roomid: builder.open_tree("alias_roomid")?, - aliasid_alias: builder.open_tree("aliasid_alias")?, - publicroomids: builder.open_tree("publicroomids")?, - - tokenids: builder.open_tree("tokenids")?, - - roomserverids: builder.open_tree("roomserverids")?, - serverroomids: builder.open_tree("serverroomids")?, - userroomid_joined: builder.open_tree("userroomid_joined")?, - roomuserid_joined: builder.open_tree("roomuserid_joined")?, - roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, - roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, - roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, - userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, - roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, - userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, - roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, - - disabledroomids: builder.open_tree("disabledroomids")?, - - lazyloadedids: builder.open_tree("lazyloadedids")?, - - userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, - userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, - - statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, - shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, - - shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, - - roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, - - shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, - eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, - shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, - shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, - roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, - roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, - statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, - - eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, - softfailedeventids: builder.open_tree("softfailedeventids")?, - - referencedevents: builder.open_tree("referencedevents")?, - roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, - roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, - mediaid_file: builder.open_tree("mediaid_file")?, - backupid_algorithm: builder.open_tree("backupid_algorithm")?, - backupid_etag: builder.open_tree("backupid_etag")?, - backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, - userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, - servername_educount: builder.open_tree("servername_educount")?, - servernameevent_data: builder.open_tree("servernameevent_data")?, - servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, - id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, - senderkey_pusher: builder.open_tree("senderkey_pusher")?, - global: builder.open_tree("global")?, - server_signingkeys: builder.open_tree("server_signingkeys")?, - - cached_registrations: Arc::new(RwLock::new(HashMap::new())), - pdu_cache: Mutex::new(LruCache::new( - config - .pdu_cache_capacity - .try_into() - .expect("pdu cache capacity fits into usize"), - )), - auth_chain_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shorteventid_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - eventidshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - shortstatekey_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - statekeyshort_cache: Mutex::new(LruCache::new( - (100_000.0 * config.conduit_cache_capacity_modifier) as usize, - )), - our_real_users_cache: RwLock::new(HashMap::new()), - appservice_in_room_cache: RwLock::new(HashMap::new()), - lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), - lasttimelinecount_cache: Mutex::new(HashMap::new()), - + userid_password: builder.open_tree("userid_password")?, + userid_displayname: builder.open_tree("userid_displayname")?, + userid_avatarurl: builder.open_tree("userid_avatarurl")?, + userid_blurhash: builder.open_tree("userid_blurhash")?, + userdeviceid_token: builder.open_tree("userdeviceid_token")?, + userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, + userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, + token_userdeviceid: builder.open_tree("token_userdeviceid")?, + onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, + userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, + keychangeid_userid: builder.open_tree("keychangeid_userid")?, + keyid_key: builder.open_tree("keyid_key")?, + userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, + userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, + userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, + userfilterid_filter: builder.open_tree("userfilterid_filter")?, + todeviceid_events: builder.open_tree("todeviceid_events")?, + + userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, + userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), + readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, + roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt + roomuserid_lastprivatereadupdate: builder + .open_tree("roomuserid_lastprivatereadupdate")?, + typingid_userid: builder.open_tree("typingid_userid")?, + roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, + presenceid_presence: builder.open_tree("presenceid_presence")?, + userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, + pduid_pdu: builder.open_tree("pduid_pdu")?, + eventid_pduid: builder.open_tree("eventid_pduid")?, + roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, + + alias_roomid: builder.open_tree("alias_roomid")?, + aliasid_alias: builder.open_tree("aliasid_alias")?, + publicroomids: builder.open_tree("publicroomids")?, + + tokenids: builder.open_tree("tokenids")?, + + roomserverids: builder.open_tree("roomserverids")?, + serverroomids: builder.open_tree("serverroomids")?, + userroomid_joined: builder.open_tree("userroomid_joined")?, + roomuserid_joined: builder.open_tree("roomuserid_joined")?, + roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, + roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, + roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, + userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, + roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, + userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, + roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + + disabledroomids: builder.open_tree("disabledroomids")?, + + lazyloadedids: builder.open_tree("lazyloadedids")?, + + userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, + userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, + + statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, + shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, + + shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, + + roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, + + shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, + eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, + shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, + shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, + roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, + roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, + statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, + + eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, + softfailedeventids: builder.open_tree("softfailedeventids")?, + + referencedevents: builder.open_tree("referencedevents")?, + roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, + roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, + mediaid_file: builder.open_tree("mediaid_file")?, + backupid_algorithm: builder.open_tree("backupid_algorithm")?, + backupid_etag: builder.open_tree("backupid_etag")?, + backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, + userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, + servername_educount: builder.open_tree("servername_educount")?, + servernameevent_data: builder.open_tree("servernameevent_data")?, + servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, + id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, + senderkey_pusher: builder.open_tree("senderkey_pusher")?, + global: builder.open_tree("global")?, + server_signingkeys: builder.open_tree("server_signingkeys")?, + + cached_registrations: Arc::new(RwLock::new(HashMap::new())), + pdu_cache: Mutex::new(LruCache::new( + config + .pdu_cache_capacity + .try_into() + .expect("pdu cache capacity fits into usize"), + )), + auth_chain_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shorteventid_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + eventidshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shortstatekey_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + statekeyshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + our_real_users_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: RwLock::new(HashMap::new()), + lazy_load_waiting: Mutex::new(HashMap::new()), + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + lasttimelinecount_cache: Mutex::new(HashMap::new()), }); let services_raw = Box::new(Services::build(Arc::clone(&db), config)?); @@ -407,7 +414,6 @@ impl KeyValueDatabase { // This is the first and only time we initialize the SERVICE static *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); - // Matrix resource ownership is based on the server name; changing it // requires recreating the database from scratch. if services().users.count()? > 0 { @@ -570,7 +576,10 @@ impl KeyValueDatabase { let states_parents = last_roomsstatehash.map_or_else( || Ok(Vec::new()), |&last_roomsstatehash| { - services().rooms.state_compressor.load_shortstatehash_info(dbg!(last_roomsstatehash)) + services() + .rooms + .state_compressor + .load_shortstatehash_info(dbg!(last_roomsstatehash)) }, )?; @@ -643,14 +652,15 @@ impl KeyValueDatabase { current_state = HashSet::new(); current_sstatehash = Some(sstatehash); - let event_id = db - .shorteventid_eventid - .get(&seventid) - .unwrap() - .unwrap(); + let event_id = db.shorteventid_eventid.get(&seventid).unwrap().unwrap(); let string = utils::string_from_bytes(&event_id).unwrap(); let event_id = <&EventId>::try_from(string.as_str()).unwrap(); - let pdu = services().rooms.timeline.get_pdu(event_id).unwrap().unwrap(); + let pdu = services() + .rooms + .timeline + .get_pdu(event_id) + .unwrap() + .unwrap(); if Some(&pdu.room_id) != current_room.as_ref() { current_room = Some(pdu.room_id.clone()); @@ -764,8 +774,7 @@ impl KeyValueDatabase { .peekable(); while iter.peek().is_some() { - db.tokenids - .insert_batch(&mut iter.by_ref().take(1000))?; + db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; println!("smaller batch done"); } @@ -803,8 +812,7 @@ impl KeyValueDatabase { // Force E2EE device list updates so we can send them over federation for user_id in services().users.iter().filter_map(|r| r.ok()) { - services().users - .mark_device_key_update(&user_id)?; + services().users.mark_device_key_update(&user_id)?; } services().globals.bump_database_version(10)?; @@ -825,7 +833,8 @@ impl KeyValueDatabase { info!( "Loaded {} database with version {}", - services().globals.config.database_backend, latest_database_version + services().globals.config.database_backend, + latest_database_version ); } else { services() @@ -837,7 +846,8 @@ impl KeyValueDatabase { warn!( "Created new {} database with version {}", - services().globals.config.database_backend, latest_database_version + services().globals.config.database_backend, + latest_database_version ); } @@ -862,9 +872,7 @@ impl KeyValueDatabase { } }; - services() - .sending - .start_handler(sending_receiver); + services().sending.start_handler(sending_receiver); Self::start_cleanup_task().await; @@ -898,7 +906,8 @@ impl KeyValueDatabase { use std::time::{Duration, Instant}; - let timer_interval = Duration::from_secs(services().globals.config.cleanup_second_interval as u64); + let timer_interval = + Duration::from_secs(services().globals.config.cleanup_second_interval as u64); tokio::spawn(async move { let mut i = interval(timer_interval); @@ -937,8 +946,10 @@ fn set_emergency_access() -> Result { let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("@conduit:server_name is a valid UserId"); - services().users - .set_password(&conduit_user, services().globals.emergency_password().as_deref())?; + services().users.set_password( + &conduit_user, + services().globals.emergency_password().as_deref(), + )?; let (ruleset, res) = match services().globals.emergency_password() { Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), @@ -951,7 +962,8 @@ fn set_emergency_access() -> Result { GlobalAccountDataEventType::PushRules.to_string().into(), &serde_json::to_value(&GlobalAccountDataEvent { content: PushRulesEventContent { global: ruleset }, - }).expect("to json value always works"), + }) + .expect("to json value always works"), )?; res diff --git a/src/lib.rs b/src/lib.rs index c103d529..e6421e8e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,22 +7,27 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] +pub mod api; mod config; mod database; mod service; -pub mod api; mod utils; -use std::{cell::Cell, sync::{RwLock, Arc}}; +use std::{ + cell::Cell, + sync::{Arc, RwLock}, +}; +pub use api::ruma_wrapper::{Ruma, RumaResponse}; pub use config::Config; +pub use service::{pdu::PduEvent, Services}; pub use utils::error::{Error, Result}; -pub use service::{Services, pdu::PduEvent}; -pub use api::ruma_wrapper::{Ruma, RumaResponse}; pub static SERVICES: RwLock> = RwLock::new(None); pub fn services<'a>() -> &'static Services { - &SERVICES.read().unwrap().expect("SERVICES should be initialized when this is called") + &SERVICES + .read() + .unwrap() + .expect("SERVICES should be initialized when this is called") } - diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs index 65780a69..c7c92981 100644 --- a/src/service/account_data/data.rs +++ b/src/service/account_data/data.rs @@ -1,7 +1,11 @@ use std::collections::HashMap; -use ruma::{UserId, RoomId, events::{RoomAccountDataEventType, AnyEphemeralRoomEvent}, serde::Raw}; use crate::Result; +use ruma::{ + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, + serde::Raw, + RoomId, UserId, +}; pub trait Data: Send + Sync { /// Places one event in the account data of the user and removes the previous entry. diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 1289f7a3..5bf167d1 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -3,9 +3,7 @@ mod data; pub use data::Data; use ruma::{ - api::client::{ - error::ErrorKind, - }, + api::client::error::ErrorKind, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, serde::Raw, signatures::CanonicalJsonValue, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 0b14314f..db596a35 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -28,7 +28,15 @@ use ruma::{ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; -use crate::{Result, services, Error, api::{server_server, client_server::{AUTO_GEN_PASSWORD_LENGTH, leave_all_rooms}}, PduEvent, utils::{HtmlEscape, self}}; +use crate::{ + api::{ + client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, + server_server, + }, + services, + utils::{self, HtmlEscape}, + Error, PduEvent, Result, +}; use super::pdu::PduBuilder; @@ -153,7 +161,6 @@ enum AdminCommand { EnableRoom { room_id: Box }, } - #[derive(Debug)] pub enum AdminRoomEvent { ProcessMessage(String), @@ -166,16 +173,14 @@ pub struct Service { } impl Service { - pub fn start_handler( - &self, - mut receiver: mpsc::UnboundedReceiver, - ) { + pub fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver) { tokio::spawn(async move { // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); - let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); + let conduit_user = + UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); let conduit_room = services() .rooms @@ -193,7 +198,8 @@ impl Service { mutex_lock: &MutexGuard<'_, ()>| { services() .rooms - .timeline.build_and_append_pdu( + .timeline + .build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&message) @@ -316,9 +322,11 @@ impl Service { ) -> Result { let reply_message_content = match command { AdminCommand::RegisterAppservice => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::(&appservice_config); + let parsed_config = + serde_yaml::from_str::(&appservice_config); match parsed_config { Ok(yaml) => match services().appservice.register_appservice(yaml) { Ok(id) => RoomMessageEventContent::text_plain(format!( @@ -343,7 +351,10 @@ impl Service { } AdminCommand::UnregisterAppservice { appservice_identifier, - } => match services().appservice.unregister_appservice(&appservice_identifier) { + } => match services() + .appservice + .unregister_appservice(&appservice_identifier) + { Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), Err(e) => RoomMessageEventContent::text_plain(format!( "Failed to unregister appservice: {}", @@ -351,7 +362,11 @@ impl Service { )), }, AdminCommand::ListAppservices => { - if let Ok(appservices) = services().appservice.iter_ids().map(|ids| ids.collect::>()) { + if let Ok(appservices) = services() + .appservice + .iter_ids() + .map(|ids| ids.collect::>()) + { let count = appservices.len(); let output = format!( "Appservices ({}): {}", @@ -399,7 +414,11 @@ impl Service { Err(e) => RoomMessageEventContent::text_plain(e.to_string()), }, AdminCommand::IncomingFederation => { - let map = services().globals.roomid_federationhandletime.read().unwrap(); + let map = services() + .globals + .roomid_federationhandletime + .read() + .unwrap(); let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); for (r, (e, i)) in map.iter() { @@ -426,7 +445,10 @@ impl Service { Error::bad_database("Invalid room id field in event in database") })?; let start = Instant::now(); - let count = services().rooms.auth_chain.get_auth_chain(room_id, vec![event_id]) + let count = services() + .rooms + .auth_chain + .get_auth_chain(room_id, vec![event_id]) .await? .count(); let elapsed = start.elapsed(); @@ -439,7 +461,8 @@ impl Service { } } AdminCommand::ParsePdu => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { let string = body[1..body.len() - 1].join("\n"); match serde_json::from_str(&string) { Ok(value) => { @@ -477,15 +500,18 @@ impl Service { } AdminCommand::GetPdu { event_id } => { let mut outlier = false; - let mut pdu_json = services().rooms.timeline.get_non_outlier_pdu_json(&event_id)?; + let mut pdu_json = services() + .rooms + .timeline + .get_non_outlier_pdu_json(&event_id)?; if pdu_json.is_none() { outlier = true; pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; } match pdu_json { Some(json) => { - let json_text = - serde_json::to_string_pretty(&json).expect("canonical json is valid json"); + let json_text = serde_json::to_string_pretty(&json) + .expect("canonical json is valid json"); RoomMessageEventContent::text_html( format!( "{}\n```json\n{}\n```", @@ -539,8 +565,11 @@ impl Service { if !services().users.exists(&user_id)? || services().users.is_deactivated(&user_id)? || user_id - == UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("conduit user exists") + == UserId::parse_with_server_name( + "conduit", + services().globals.server_name(), + ) + .expect("conduit user exists") { return Ok(RoomMessageEventContent::text_plain( "The specified user does not exist or is deactivated!", @@ -549,7 +578,10 @@ impl Service { let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); - match services().users.set_password(&user_id, Some(new_password.as_str())) { + match services() + .users + .set_password(&user_id, Some(new_password.as_str())) + { Ok(()) => RoomMessageEventContent::text_plain(format!( "Successfully reset the password for user {}: {}", user_id, new_password @@ -590,7 +622,8 @@ impl Service { // Default to pretty displayname let displayname = format!("{} ⚡️", user_id.localpart()); - services().users + services() + .users .set_displayname(&user_id, Some(displayname.clone()))?; // Initial account data @@ -604,7 +637,8 @@ impl Service { content: ruma::events::push_rules::PushRulesEventContent { global: ruma::push::Ruleset::server_default(&user_id), }, - }).expect("to json value always works"), + }) + .expect("to json value always works"), )?; // we dont add a device since we're not the user, just the creator @@ -651,7 +685,8 @@ impl Service { } } AdminCommand::DeactivateAll { leave_rooms, force } => { - if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { let usernames = body.clone().drain(1..body.len() - 1).collect::>(); let mut user_ids: Vec<&UserId> = Vec::new(); @@ -672,17 +707,15 @@ impl Service { let mut admins = Vec::new(); if !force { - user_ids.retain(|&user_id| { - match services().users.is_admin(user_id) { - Ok(is_admin) => match is_admin { - true => { - admins.push(user_id.localpart()); - false - } - false => true, - }, - Err(_) => false, - } + user_ids.retain(|&user_id| match services().users.is_admin(user_id) { + Ok(is_admin) => match is_admin { + true => { + admins.push(user_id.localpart()); + false + } + false => true, + }, + Err(_) => false, }) } @@ -783,8 +816,8 @@ impl Service { } else { // Wrap the usage line in a code block, and add a yaml block example // This makes the usage of e.g. `register-appservice` more accurate - let re = - Regex::new("(?m)^USAGE:\n (.*?)\n\n").expect("Regex compilation should not fail"); + let re = Regex::new("(?m)^USAGE:\n (.*?)\n\n") + .expect("Regex compilation should not fail"); re.replace_all(&text, "USAGE:\n
                $1[nobr]\n[commandbodyblock]
                ") .replace("[commandbodyblock]", &command_body) }; @@ -808,7 +841,8 @@ impl Service { services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -818,8 +852,9 @@ impl Service { let state_lock = mutex_state.lock().await; // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + let conduit_user = + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); services().users.create(&conduit_user, None)?; @@ -1002,9 +1037,10 @@ impl Service { user_id: &UserId, displayname: String, ) -> Result<()> { - let admin_room_alias: Box = format!("#admins:{}", services().globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); + let admin_room_alias: Box = + format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); let room_id = services() .rooms .alias @@ -1012,7 +1048,8 @@ impl Service { .expect("Admin room must exist"); let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -1022,8 +1059,9 @@ impl Service { let state_lock = mutex_state.lock().await; // Use the server user to grant the new admin's power level - let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("@conduit:server_name is valid"); + let conduit_user = + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); // Invite and join the real user services().rooms.timeline.build_and_append_pdu( diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 0f74b2a7..407ff1c4 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -1,7 +1,11 @@ use std::collections::BTreeMap; use async_trait::async_trait; -use ruma::{signatures::Ed25519KeyPair, DeviceId, UserId, ServerName, api::federation::discovery::{ServerSigningKeys, VerifyKey}, ServerSigningKeyId}; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + signatures::Ed25519KeyPair, + DeviceId, ServerName, ServerSigningKeyId, UserId, +}; use crate::Result; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index de8d1aa7..23a61599 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -4,7 +4,7 @@ pub use data::Data; use crate::api::server_server::FedDest; use crate::service::*; -use crate::{Config, utils, Error, Result}; +use crate::{utils, Config, Error, Result}; use ruma::{ api::{ client::sync::sync_events, @@ -89,12 +89,8 @@ impl Default for RotationHandler { } } - impl Service { - pub fn load( - db: Arc, - config: Config, - ) -> Result { + pub fn load(db: Arc, config: Config) -> Result { let keypair = db.load_keypair(); let keypair = match keypair { diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index 226b1e16..f711e5d9 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -1,7 +1,11 @@ use std::collections::BTreeMap; -use ruma::{api::client::backup::{BackupAlgorithm, RoomKeyBackup, KeyBackupData}, serde::Raw, UserId, RoomId}; use crate::Result; +use ruma::{ + api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, + serde::Raw, + RoomId, UserId, +}; pub trait Data: Send + Sync { fn create_backup( @@ -21,16 +25,10 @@ pub trait Data: Send + Sync { fn get_latest_backup_version(&self, user_id: &UserId) -> Result>; - fn get_latest_backup( - &self, - user_id: &UserId, - ) -> Result)>>; + fn get_latest_backup(&self, user_id: &UserId) + -> Result)>>; - fn get_backup( - &self, - user_id: &UserId, - version: &str, - ) -> Result>>; + fn get_backup(&self, user_id: &UserId, version: &str) -> Result>>; fn add_key( &self, @@ -68,12 +66,7 @@ pub trait Data: Send + Sync { fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()>; - fn delete_room_keys( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - ) -> Result<()>; + fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()>; fn delete_room_key( &self, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index a3bed714..41ec1c1b 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use crate::{utils, Error, Result, services}; +use crate::{services, utils, Error, Result}; use ruma::{ api::client::{ backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, @@ -65,7 +65,8 @@ impl Service { session_id: &str, key_data: &Raw, ) -> Result<()> { - self.db.add_key(user_id, version, room_id, session_id, key_data) + self.db + .add_key(user_id, version, room_id, session_id, key_data) } pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { @@ -123,6 +124,7 @@ impl Service { room_id: &RoomId, session_id: &str, ) -> Result<()> { - self.db.delete_room_key(user_id, version, room_id, session_id) + self.db + .delete_room_key(user_id, version, room_id, session_id) } } diff --git a/src/service/media/data.rs b/src/service/media/data.rs index 2e24049a..75a682cb 100644 --- a/src/service/media/data.rs +++ b/src/service/media/data.rs @@ -1,8 +1,20 @@ use crate::Result; pub trait Data: Send + Sync { - fn create_file_metadata(&self, mxc: String, width: u32, height: u32, content_disposition: Option<&str>, content_type: Option<&str>) -> Result>; + fn create_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + content_disposition: Option<&str>, + content_type: Option<&str>, + ) -> Result>; /// Returns content_disposition, content_type and the metadata key. - fn search_file_metadata(&self, mxc: String, width: u32, height: u32) -> Result<(Option, Option, Vec)>; + fn search_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + ) -> Result<(Option, Option, Vec)>; } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index d3dd2bdc..ea276c04 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,8 +1,8 @@ mod data; pub use data::Data; +use crate::{services, utils, Error, Result}; use image::{imageops::FilterType, GenericImageView}; -use crate::{utils, Error, Result, services}; use std::{mem, sync::Arc}; use tokio::{ fs::File, @@ -29,7 +29,9 @@ impl Service { file: &[u8], ) -> Result<()> { // Width, Height = 0 if it's not a thumbnail - let key = self.db.create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; + let key = self + .db + .create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; @@ -48,7 +50,9 @@ impl Service { height: u32, file: &[u8], ) -> Result<()> { - let key = self.db.create_file_metadata(mxc, width, height, content_disposition, content_type)?; + let key = + self.db + .create_file_metadata(mxc, width, height, content_disposition, content_type)?; let path = services().globals.get_media_file(&key); let mut f = File::create(path).await?; @@ -59,12 +63,13 @@ impl Service { /// Downloads a file. pub async fn get(&self, mxc: String) -> Result> { - if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc, 0, 0) { + if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc, 0, 0) + { let path = services().globals.get_media_file(&key); let mut file = Vec::new(); File::open(path).await?.read_to_end(&mut file).await?; - Ok(Some(FileMeta { content_disposition, content_type, @@ -108,7 +113,9 @@ impl Service { .thumbnail_properties(width, height) .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), width, height) { + if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc.clone(), width, height) + { // Using saved thumbnail let path = services().globals.get_media_file(&key); let mut file = Vec::new(); @@ -119,7 +126,9 @@ impl Service { content_type, file: file.to_vec(), })) - } else if let Ok((content_disposition, content_type, key)) = self.db.search_file_metadata(mxc.clone(), 0, 0) { + } else if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc.clone(), 0, 0) + { // Generate a thumbnail let path = services().globals.get_media_file(&key); let mut file = Vec::new(); @@ -180,7 +189,13 @@ impl Service { thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; // Save thumbnail in database so we don't have to generate it again next time - let thumbnail_key = self.db.create_file_metadata(mxc, width, height, content_disposition.as_deref(), content_type.as_deref())?; + let thumbnail_key = self.db.create_file_metadata( + mxc, + width, + height, + content_disposition.as_deref(), + content_type.as_deref(), + )?; let path = services().globals.get_media_file(&thumbnail_key); let mut f = File::create(path).await?; diff --git a/src/service/mod.rs b/src/service/mod.rs index daf43293..dbddf405 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -5,7 +5,7 @@ use std::{ use lru_cache::LruCache; -use crate::{Result, Config}; +use crate::{Config, Result}; pub mod account_data; pub mod admin; @@ -49,7 +49,8 @@ impl Services { + key_backups::Data + media::Data, >( - db: Arc, config: Config + db: Arc, + config: Config, ) -> Result { Ok(Self { appservice: appservice::Service { db: db.clone() }, @@ -76,30 +77,26 @@ impl Services { state: rooms::state::Service { db: db.clone() }, state_accessor: rooms::state_accessor::Service { db: db.clone() }, state_cache: rooms::state_cache::Service { db: db.clone() }, - state_compressor: rooms::state_compressor::Service { db: db.clone(), stateinfo_cache: Mutex::new(LruCache::new((100.0 * config.conduit_cache_capacity_modifier) as usize,)) }, - timeline: rooms::timeline::Service { db: db.clone(), lasttimelinecount_cache: Mutex::new(HashMap::new()) }, + state_compressor: rooms::state_compressor::Service { + db: db.clone(), + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + }, + timeline: rooms::timeline::Service { + db: db.clone(), + lasttimelinecount_cache: Mutex::new(HashMap::new()), + }, user: rooms::user::Service { db: db.clone() }, }, - transaction_ids: transaction_ids::Service { - db: db.clone() - }, - uiaa: uiaa::Service { - db: db.clone() - }, - users: users::Service { - db: db.clone() - }, - account_data: account_data::Service { - db: db.clone() - }, + transaction_ids: transaction_ids::Service { db: db.clone() }, + uiaa: uiaa::Service { db: db.clone() }, + users: users::Service { db: db.clone() }, + account_data: account_data::Service { db: db.clone() }, admin: admin::Service { sender: todo!() }, globals: globals::Service::load(db.clone(), config)?, - key_backups: key_backups::Service { - db: db.clone() - }, - media: media::Service { - db: db.clone() - }, + key_backups: key_backups::Service { db: db.clone() }, + media: media::Service { db: db.clone() }, sending: sending::Service { maximum_requests: todo!(), sender: todo!(), diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 3be3300c..724b2b21 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,4 +1,4 @@ -use crate::{Error, services}; +use crate::{services, Error}; use ruma::{ events::{ room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 305a5383..243b77f7 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -1,5 +1,8 @@ -use ruma::{UserId, api::client::push::{set_pusher, get_pushers}}; use crate::Result; +use ruma::{ + api::client::push::{get_pushers, set_pusher}, + UserId, +}; pub trait Data: Send + Sync { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; @@ -8,8 +11,5 @@ pub trait Data: Send + Sync { fn get_pushers(&self, sender: &UserId) -> Result>; - fn get_pusher_senderkeys<'a>( - &'a self, - sender: &UserId, - ) -> Box>>; + fn get_pusher_senderkeys<'a>(&'a self, sender: &UserId) -> Box>>; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index e65c57ab..78d5f26c 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -79,7 +79,11 @@ impl Service { //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); let url = reqwest_request.url().clone(); - let response = services().globals.default_client().execute(reqwest_request).await; + let response = services() + .globals + .default_client() + .execute(reqwest_request) + .await; match response { Ok(mut response) => { @@ -196,7 +200,8 @@ impl Service { let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: services().users + user_display_name: services() + .users .displayname(user)? .unwrap_or_else(|| user.localpart().to_owned()), users_power_levels: power_levels.users.clone(), @@ -276,10 +281,10 @@ impl Service { let user_name = services().users.displayname(&event.sender)?; notifi.sender_display_name = user_name.as_deref(); - let room_name = if let Some(room_name_pdu) = - services().rooms + let room_name = if let Some(room_name_pdu) = services() + .rooms .state_accessor - .room_state_get(&event.room_id, &StateEventType::RoomName, "")? + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? { serde_json::from_str::(room_name_pdu.content.get()) .map_err(|_| Error::bad_database("Invalid room name event in database."))? @@ -290,11 +295,8 @@ impl Service { notifi.room_name = room_name.as_deref(); - self.send_request( - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; + self.send_request(url, send_event_notification::v1::Request::new(notifi)) + .await?; } // TODO: email diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 26bffae2..90205f93 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,25 +1,15 @@ -use ruma::{RoomId, RoomAliasId}; use crate::Result; +use ruma::{RoomAliasId, RoomId}; pub trait Data: Send + Sync { /// Creates or updates the alias to the given room id. - fn set_alias( - &self, - alias: &RoomAliasId, - room_id: &RoomId - ) -> Result<()>; + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>; /// Forgets about an alias. Returns an error if the alias did not exist. - fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()>; + fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; /// Looks up the roomid for the given alias. - fn resolve_local_alias( - &self, - alias: &RoomAliasId, - ) -> Result>>; + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>>; /// Returns all local aliases that point to the given room fn local_aliases_for_room( diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 65fb3677..6a3cf4e0 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,8 +3,8 @@ use std::sync::Arc; pub use data::Data; -use ruma::{RoomAliasId, RoomId}; use crate::Result; +use ruma::{RoomAliasId, RoomId}; pub struct Service { db: Arc, @@ -12,19 +12,12 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: &RoomId, - ) -> Result<()> { + pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { self.db.set_alias(alias, room_id) } #[tracing::instrument(skip(self))] - pub fn remove_alias( - &self, - alias: &RoomAliasId, - ) -> Result<()> { + pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { self.db.remove_alias(alias) } diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs index 13fac2dc..e8c379fc 100644 --- a/src/service/rooms/auth_chain/data.rs +++ b/src/service/rooms/auth_chain/data.rs @@ -1,7 +1,11 @@ -use std::{collections::HashSet, sync::Arc}; use crate::Result; +use std::{collections::HashSet, sync::Arc}; pub trait Data: Send + Sync { - fn get_cached_eventid_authchain(&self, shorteventid: &[u64]) -> Result>>>; - fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) -> Result<()>; + fn get_cached_eventid_authchain( + &self, + shorteventid: &[u64], + ) -> Result>>>; + fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) + -> Result<()>; } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index e35094bb..ed06385d 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -1,11 +1,14 @@ mod data; -use std::{sync::Arc, collections::{HashSet, BTreeSet}}; +use std::{ + collections::{BTreeSet, HashSet}, + sync::Arc, +}; pub use data::Data; -use ruma::{RoomId, EventId, api::client::error::ErrorKind}; +use ruma::{api::client::error::ErrorKind, EventId, RoomId}; use tracing::log::warn; -use crate::{Result, services, Error}; +use crate::{services, Error, Result}; pub struct Service { db: Arc, @@ -56,7 +59,11 @@ impl Service { } let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&chunk_key)? { + if let Some(cached) = services() + .rooms + .auth_chain + .get_cached_eventid_authchain(&chunk_key)? + { hits += 1; full_auth_chain.extend(cached.iter().copied()); continue; @@ -68,13 +75,18 @@ impl Service { let mut misses2 = 0; let mut i = 0; for (sevent_id, event_id) in chunk { - if let Some(cached) = services().rooms.auth_chain.get_cached_eventid_authchain(&[sevent_id])? { + if let Some(cached) = services() + .rooms + .auth_chain + .get_cached_eventid_authchain(&[sevent_id])? + { hits2 += 1; chunk_cache.extend(cached.iter().copied()); } else { misses2 += 1; let auth_chain = Arc::new(self.get_auth_chain_inner(room_id, &event_id)?); - services().rooms + services() + .rooms .auth_chain .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; println!( @@ -97,8 +109,10 @@ impl Service { misses2 ); let chunk_cache = Arc::new(chunk_cache); - services().rooms - .auth_chain.cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + services() + .rooms + .auth_chain + .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; full_auth_chain.extend(chunk_cache.iter()); } @@ -115,11 +129,7 @@ impl Service { } #[tracing::instrument(skip(self, event_id))] - fn get_auth_chain_inner( - &self, - room_id: &RoomId, - event_id: &EventId, - ) -> Result> { + fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { let mut todo = vec![Arc::from(event_id)]; let mut found = HashSet::new(); @@ -131,7 +141,8 @@ impl Service { } for auth_event in &pdu.auth_events { let sauthevent = services() - .rooms.short + .rooms + .short .get_or_create_shorteventid(auth_event)?; if !found.contains(&sauthevent) { diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index b4e020d7..fb523cf8 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,5 +1,5 @@ -use ruma::RoomId; use crate::Result; +use ruma::RoomId; pub trait Data: Send + Sync { /// Adds the room to the public room directory diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index f7592555..f3784040 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; -use ruma::{UserId, RoomId, events::presence::PresenceEvent}; use crate::Result; +use ruma::{events::presence::PresenceEvent, RoomId, UserId}; pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index d6578977..636bd910 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -2,7 +2,7 @@ mod data; use std::{collections::HashMap, sync::Arc}; pub use data::Data; -use ruma::{RoomId, UserId, events::presence::PresenceEvent}; +use ruma::{events::presence::PresenceEvent, RoomId, UserId}; use crate::Result; diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 5ebd89d6..734c68d5 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,5 +1,5 @@ -use ruma::{RoomId, events::receipt::ReceiptEvent, UserId, serde::Raw}; use crate::Result; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; pub trait Data: Send + Sync { /// Replaces the previous read receipt. @@ -15,13 +15,15 @@ pub trait Data: Send + Sync { &self, room_id: &RoomId, since: u64, - ) -> Box, - u64, - Raw, - )>, - >>; + ) -> Box< + dyn Iterator< + Item = Result<( + Box, + u64, + Raw, + )>, + >, + >; /// Sets a private read marker at `count`. fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 17708772..35fee1a5 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -3,8 +3,8 @@ use std::sync::Arc; pub use data::Data; -use ruma::{RoomId, UserId, events::receipt::ReceiptEvent, serde::Raw}; use crate::Result; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; pub struct Service { db: Arc, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 426d4e06..50b6d13e 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,6 +1,6 @@ -use std::collections::HashSet; use crate::Result; -use ruma::{UserId, RoomId}; +use ruma::{RoomId, UserId}; +use std::collections::HashSet; pub trait Data: Send + Sync { /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 37520560..91892df6 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -2,7 +2,7 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{UserId, RoomId, events::SyncEphemeralRoomEvent}; +use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; use crate::Result; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index d6ec8e95..689f6780 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1,22 +1,33 @@ /// An async function that can recursively call itself. type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; -use ruma::{RoomVersionId, signatures::CanonicalJsonObject, api::federation::discovery::{get_server_keys, get_remote_server_keys}}; -use tokio::sync::Semaphore; +use ruma::{ + api::federation::discovery::{get_remote_server_keys, get_server_keys}, + signatures::CanonicalJsonObject, + RoomVersionId, +}; use std::{ collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, pin::Pin, sync::{Arc, RwLock, RwLockWriteGuard}, time::{Duration, Instant, SystemTime}, }; +use tokio::sync::Semaphore; -use futures_util::{Future, stream::FuturesUnordered, StreamExt}; +use futures_util::{stream::FuturesUnordered, Future, StreamExt}; use ruma::{ api::{ client::error::ErrorKind, - federation::{event::{get_event, get_room_state_ids}, membership::create_join_event, discovery::get_remote_server_keys_batch::{v2::QueryCriteria, self}}, + federation::{ + discovery::get_remote_server_keys_batch::{self, v2::QueryCriteria}, + event::{get_event, get_room_state_ids}, + membership::create_join_event, + }, + }, + events::{ + room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, + StateEventType, }, - events::{room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, StateEventType}, int, serde::Base64, signatures::CanonicalJsonValue, @@ -24,9 +35,9 @@ use ruma::{ uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use tracing::{error, info, trace, warn, debug}; +use tracing::{debug, error, info, trace, warn}; -use crate::{service::*, services, Result, Error, PduEvent}; +use crate::{service::*, services, Error, PduEvent, Result}; pub struct Service; @@ -72,10 +83,7 @@ impl Service { )); } - if services() - .rooms - .metadata - .is_disabled(room_id)? { + if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Federation of this room is currently disabled on this server.", @@ -94,7 +102,8 @@ impl Service { .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; let first_pdu_in_room = services() - .rooms.timeline + .rooms + .timeline .first_pdu_in_room(room_id)? .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; @@ -113,21 +122,20 @@ impl Service { } // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let (sorted_prev_events, mut eventid_info) = self.fetch_unknown_prev_events( - origin, - &create_event, - room_id, - pub_key_map, - incoming_pdu.prev_events.clone(), - ).await?; + let (sorted_prev_events, mut eventid_info) = self + .fetch_unknown_prev_events( + origin, + &create_event, + room_id, + pub_key_map, + incoming_pdu.prev_events.clone(), + ) + .await?; let mut errors = 0; for prev_id in dbg!(sorted_prev_events) { // Check for disabled again because it might have changed - if services() - .rooms - .metadata - .is_disabled(room_id)? { + if services().rooms.metadata.is_disabled(room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, "Federation of this room is currently disabled on this server.", @@ -224,15 +232,18 @@ impl Service { .write() .unwrap() .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); - let r = services().rooms.event_handler.upgrade_outlier_to_timeline_pdu( - incoming_pdu, - val, - &create_event, - origin, - room_id, - pub_key_map, - ) - .await; + let r = services() + .rooms + .event_handler + .upgrade_outlier_to_timeline_pdu( + incoming_pdu, + val, + &create_event, + origin, + room_id, + pub_key_map, + ) + .await; services() .globals .roomid_federationhandletime @@ -252,8 +263,7 @@ impl Service { room_id: &'a RoomId, value: BTreeMap, pub_key_map: &'a RwLock>>, - ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> - { + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json @@ -282,14 +292,22 @@ impl Service { Err(e) => { // Drop warn!("Dropping bad event {}: {}", event_id, e); - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Signature verification failed")); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Signature verification failed", + )); } Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); match ruma::signatures::redact(&value, room_version_id) { Ok(obj) => obj, - Err(_) => return Err(Error::BadRequest(ErrorKind::InvalidParam, "Redaction failed")), + Err(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Redaction failed", + )) + } } } Ok(ruma::signatures::Verified::All) => value, @@ -376,7 +394,8 @@ impl Service { &incoming_pdu, None::, // TODO: third party invite |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), - ).map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -415,9 +434,13 @@ impl Service { if services() .rooms - .pdu_metadata.is_event_soft_failed(&incoming_pdu.event_id)? + .pdu_metadata + .is_event_soft_failed(&incoming_pdu.event_id)? { - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event has been soft failed", + )); } info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); @@ -448,7 +471,13 @@ impl Service { .pdu_shortstatehash(prev_event)?; let state = if let Some(shortstatehash) = prev_event_sstatehash { - Some(services().rooms.state_accessor.state_full_ids(shortstatehash).await) + Some( + services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await, + ) } else { None }; @@ -466,10 +495,10 @@ impl Service { })?; if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&prev_pdu.kind.to_string().into(), state_key)?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + )?; state.insert(shortstatekey, Arc::from(prev_event)); // Now it's the state after the pdu @@ -483,21 +512,25 @@ impl Service { let mut okay = true; for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) { - pdu - } else { - okay = false; - break; - }; - - let sstatehash = - if let Ok(Some(s)) = services().rooms.state_accessor.pdu_shortstatehash(prev_eventid) { - s + let prev_event = + if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) { + pdu } else { okay = false; break; }; + let sstatehash = if let Ok(Some(s)) = services() + .rooms + .state_accessor + .pdu_shortstatehash(prev_eventid) + { + s + } else { + okay = false; + break; + }; + extremity_sstatehashes.insert(sstatehash, prev_event); } @@ -513,13 +546,10 @@ impl Service { .await?; if let Some(state_key) = &prev_event.state_key { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey( - &prev_event.kind.to_string().into(), - state_key, - )?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + )?; leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); // Now it's the state after the pdu } @@ -528,7 +558,8 @@ impl Service { let mut starting_events = Vec::with_capacity(leaf_state.len()); for (k, id) in leaf_state { - if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) { + if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) + { // FIXME: Undo .to_string().into() when StateMap // is updated to use StateEventType state.insert((ty.to_string().into(), st_key), id.clone()); @@ -567,10 +598,8 @@ impl Service { new_state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey( + let shortstatekey = + services().rooms.short.get_or_create_shortstatekey( &event_type.to_string().into(), &state_key, )?; @@ -618,15 +647,14 @@ impl Service { let mut state: BTreeMap<_, Arc> = BTreeMap::new(); for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| Error::bad_database("Found non-state pdu in state events."))?; + let state_key = pdu.state_key.clone().ok_or_else(|| { + Error::bad_database("Found non-state pdu in state events.") + })?; - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&pdu.kind.to_string().into(), &state_key)?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + )?; match state.entry(shortstatekey) { btree_map::Entry::Vacant(v) => { @@ -648,7 +676,9 @@ impl Service { if state.get(&create_shortstatekey).map(|id| id.as_ref()) != Some(&create_event.event_id) { - return Err(Error::bad_database("Incoming event refers to wrong create event.")); + return Err(Error::bad_database( + "Incoming event refers to wrong create event.", + )); } state_at_incoming_event = Some(state); @@ -683,7 +713,9 @@ impl Service { .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if !check_result { - return Err(Error::bad_database("Event has failed auth check with state at the event.")); + return Err(Error::bad_database( + "Event has failed auth check with state at the event.", + )); } info!("Auth check succeeded"); @@ -703,10 +735,7 @@ impl Service { // Now we calculate the set of extremities this room has after the incoming event has been // applied. We start with the previous extremities (aka leaves) info!("Calculating extremities"); - let mut extremities = services() - .rooms - .state - .get_forward_extremities(room_id)?; + let mut extremities = services().rooms.state.get_forward_extremities(room_id)?; // Remove any forward extremities that are referenced by this incoming event's prev_events for prev_event in &incoming_pdu.prev_events { @@ -716,8 +745,15 @@ impl Service { } // Only keep those extremities were not referenced yet - extremities - .retain(|id| !matches!(services().rooms.pdu_metadata.is_event_referenced(room_id, id), Ok(true))); + extremities.retain(|id| { + !matches!( + services() + .rooms + .pdu_metadata + .is_event_referenced(room_id, id), + Ok(true) + ) + }); info!("Compressing state at event"); let state_ids_compressed = state_at_incoming_event @@ -733,23 +769,21 @@ impl Service { // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it info!("Starting soft fail auth check"); - let auth_events = services() - .rooms - .state - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - )?; + let auth_events = services().rooms.state.get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + )?; let soft_fail = !state_res::event_auth::auth_check( &room_version, &incoming_pdu, None::, |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ).map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if soft_fail { services().rooms.timeline.append_incoming_pdu( @@ -767,7 +801,10 @@ impl Service { .rooms .pdu_metadata .mark_event_soft_failed(&incoming_pdu.event_id)?; - return Err(Error::BadRequest(ErrorKind::InvalidParam, "Event has been soft failed")); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event has been soft failed", + )); } if incoming_pdu.state_key.is_some() { @@ -789,15 +826,12 @@ impl Service { info!("Loading extremities"); for id in dbg!(&extremities) { - match services() - .rooms - .timeline - .get_pdu(id)? - { + match services().rooms.timeline.get_pdu(id)? { Some(leaf_pdu) => { extremity_sstatehashes.insert( services() - .rooms.state_accessor + .rooms + .state_accessor .pdu_shortstatehash(&leaf_pdu.event_id)? .ok_or_else(|| { error!( @@ -829,10 +863,10 @@ impl Service { // We also add state after incoming event to the fork states let mut state_after = state_at_incoming_event.clone(); if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&incoming_pdu.kind.to_string().into(), state_key)?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + )?; state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); } @@ -921,10 +955,10 @@ impl Service { state .into_iter() .map(|((event_type, state_key), event_id)| { - let shortstatekey = services() - .rooms - .short - .get_or_create_shortstatekey(&event_type.to_string().into(), &state_key)?; + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + )?; services() .rooms .state_compressor @@ -936,7 +970,10 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); - let sstatehash = services().rooms.state_compressor.save_state(room_id, new_room_state)?; + let sstatehash = services() + .rooms + .state_compressor + .save_state(room_id, new_room_state)?; services() .rooms .state @@ -951,15 +988,14 @@ impl Service { // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let pdu_id = services().rooms.timeline - .append_incoming_pdu( - &incoming_pdu, - val, - extremities.iter().map(|e| (**e).to_owned()).collect(), - state_ids_compressed, - soft_fail, - &state_lock, - )?; + let pdu_id = services().rooms.timeline.append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + )?; info!("Appended incoming pdu"); @@ -1141,8 +1177,10 @@ impl Service { room_id: &RoomId, pub_key_map: &RwLock>>, initial_set: Vec>, - ) -> Result<(Vec>, HashMap, -(Arc, BTreeMap)>)> { + ) -> Result<( + Vec>, + HashMap, (Arc, BTreeMap)>, + )> { let mut graph: HashMap, _> = HashMap::new(); let mut eventid_info = HashMap::new(); let mut todo_outlier_stack: Vec> = initial_set; @@ -1223,7 +1261,8 @@ impl Service { .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), ), )) - }).map_err(|_| Error::bad_database("Error sorting prev events"))?; + }) + .map_err(|_| Error::bad_database("Error sorting prev events"))?; Ok((sorted, eventid_info)) } @@ -1253,13 +1292,16 @@ impl Service { let signature_ids = signature_object.keys().cloned().collect::>(); - let fetch_res = self.fetch_signing_keys( - signature_server.as_str().try_into().map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?, - signature_ids, - ) - .await; + let fetch_res = self + .fetch_signing_keys( + signature_server.as_str().try_into().map_err(|_| { + Error::BadServerResponse( + "Invalid servername in signatures of server response pdu.", + ) + })?, + signature_ids, + ) + .await; let keys = match fetch_res { Ok(keys) => keys, @@ -1336,8 +1378,9 @@ impl Service { let signature_ids = signature_object.keys().cloned().collect::>(); - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + let contains_all_ids = |keys: &BTreeMap| { + signature_ids.iter().all(|id| keys.contains_key(id)) + }; let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { Error::BadServerResponse("Invalid servername in signatures of server response pdu.") @@ -1373,8 +1416,10 @@ impl Service { room_version: &RoomVersionId, pub_key_map: &RwLock>>, ) -> Result<()> { - let mut servers: BTreeMap, BTreeMap, QueryCriteria>> = - BTreeMap::new(); + let mut servers: BTreeMap< + Box, + BTreeMap, QueryCriteria>, + > = BTreeMap::new(); { let mut pkm = pub_key_map @@ -1440,11 +1485,9 @@ impl Service { .into_iter() .map(|(server, _)| async move { ( - services().sending - .send_federation_request( - &server, - get_server_keys::v2::Request::new(), - ) + services() + .sending + .send_federation_request(&server, get_server_keys::v2::Request::new()) .await, server, ) @@ -1472,10 +1515,11 @@ impl Service { /// Returns Ok if the acl allows the server pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { - let acl_event = match services() - .rooms.state_accessor - .room_state_get(room_id, &StateEventType::RoomServerAcl, "")? - { + let acl_event = match services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomServerAcl, + "", + )? { Some(acl) => acl, None => return Ok(()), }; @@ -1587,7 +1631,9 @@ impl Service { .ok() .and_then(|resp| resp.server_key.deserialize().ok()) { - services().globals.add_signing_key(origin, server_key.clone())?; + services() + .globals + .add_signing_key(origin, server_key.clone())?; result.extend( server_key diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs index 524071c3..9af8e21b 100644 --- a/src/service/rooms/lazy_loading/data.rs +++ b/src/service/rooms/lazy_loading/data.rs @@ -1,5 +1,5 @@ -use ruma::{RoomId, DeviceId, UserId}; use crate::Result; +use ruma::{DeviceId, RoomId, UserId}; pub trait Data: Send + Sync { fn lazy_load_was_sent_before( @@ -15,7 +15,7 @@ pub trait Data: Send + Sync { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - confirmed_user_ids: &mut dyn Iterator, + confirmed_user_ids: &mut dyn Iterator, ) -> Result<()>; fn lazy_load_reset( diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 760fffee..a01ce9ba 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,15 +1,19 @@ mod data; -use std::{collections::{HashSet, HashMap}, sync::{Mutex, Arc}}; +use std::{ + collections::{HashMap, HashSet}, + sync::{Arc, Mutex}, +}; pub use data::Data; -use ruma::{DeviceId, UserId, RoomId}; +use ruma::{DeviceId, RoomId, UserId}; use crate::Result; pub struct Service { db: Arc, - lazy_load_waiting: Mutex, Box, Box, u64), HashSet>>>, + lazy_load_waiting: + Mutex, Box, Box, u64), HashSet>>>, } impl Service { @@ -21,7 +25,8 @@ impl Service { room_id: &RoomId, ll_user: &UserId, ) -> Result { - self.db.lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) + self.db + .lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) } #[tracing::instrument(skip(self))] @@ -58,7 +63,12 @@ impl Service { room_id.to_owned(), since, )) { - self.db.lazy_load_confirm_delivery(user_id, device_id, room_id, &mut user_ids.iter().map(|&u| &*u))?; + self.db.lazy_load_confirm_delivery( + user_id, + device_id, + room_id, + &mut user_ids.iter().map(|&u| &*u), + )?; } else { // Ignore } diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index bc31ee88..27e7eb98 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,5 +1,5 @@ -use ruma::RoomId; use crate::Result; +use ruma::RoomId; pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs index f1b0badf..8956e4d8 100644 --- a/src/service/rooms/mod.rs +++ b/src/service/rooms/mod.rs @@ -16,7 +16,25 @@ pub mod state_compressor; pub mod timeline; pub mod user; -pub trait Data: alias::Data + auth_chain::Data + directory::Data + edus::Data + lazy_loading::Data + metadata::Data + outlier::Data + pdu_metadata::Data + search::Data + short::Data + state::Data + state_accessor::Data + state_cache::Data + state_compressor::Data + timeline::Data + user::Data {} +pub trait Data: + alias::Data + + auth_chain::Data + + directory::Data + + edus::Data + + lazy_loading::Data + + metadata::Data + + outlier::Data + + pdu_metadata::Data + + search::Data + + short::Data + + state::Data + + state_accessor::Data + + state_cache::Data + + state_compressor::Data + + timeline::Data + + user::Data +{ +} pub struct Service { pub alias: alias::Service, diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index d36adc4c..6404d8a1 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -2,9 +2,9 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{EventId, signatures::CanonicalJsonObject}; +use ruma::{signatures::CanonicalJsonObject, EventId}; -use crate::{Result, PduEvent}; +use crate::{PduEvent, Result}; pub struct Service { db: Arc, diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 9bc49cfb..b157938f 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use ruma::{EventId, RoomId}; use crate::Result; +use ruma::{EventId, RoomId}; pub trait Data: Send + Sync { fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 4724f857..70443389 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -2,7 +2,7 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{RoomId, EventId}; +use ruma::{EventId, RoomId}; use crate::Result; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 0c14ffe6..59652e02 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -1,5 +1,5 @@ -use ruma::RoomId; use crate::Result; +use ruma::RoomId; pub trait Data: Send + Sync { fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>; diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index ec1ad537..0ef96342 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -12,7 +12,12 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] - pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { + pub fn index_pdu<'a>( + &self, + shortroomid: u64, + pdu_id: &[u8], + message_body: String, + ) -> Result<()> { self.db.index_pdu(shortroomid, pdu_id, message_body) } diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs index 07a27121..652c525b 100644 --- a/src/service/rooms/short/data.rs +++ b/src/service/rooms/short/data.rs @@ -1,13 +1,10 @@ use std::sync::Arc; -use ruma::{EventId, events::StateEventType, RoomId}; use crate::Result; +use ruma::{events::StateEventType, EventId, RoomId}; pub trait Data: Send + Sync { - fn get_or_create_shorteventid( - &self, - event_id: &EventId, - ) -> Result; + fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result; fn get_shortstatekey( &self, @@ -26,15 +23,9 @@ pub trait Data: Send + Sync { fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>; /// Returns (shortstatehash, already_existed) - fn get_or_create_shortstatehash( - &self, - state_hash: &[u8], - ) -> Result<(u64, bool)>; + fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)>; fn get_shortroomid(&self, room_id: &RoomId) -> Result>; - fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - ) -> Result; + fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result; } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 08ce5c5a..1d2e0407 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -2,19 +2,16 @@ mod data; use std::sync::Arc; pub use data::Data; -use ruma::{EventId, events::StateEventType, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result, Error, utils, services}; +use crate::{services, utils, Error, Result}; pub struct Service { db: Arc, } impl Service { - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - ) -> Result { + pub fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { self.db.get_or_create_shorteventid(event_id) } @@ -43,10 +40,7 @@ impl Service { } /// Returns (shortstatehash, already_existed) - pub fn get_or_create_shortstatehash( - &self, - state_hash: &[u8], - ) -> Result<(u64, bool)> { + pub fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { self.db.get_or_create_shortstatehash(state_hash) } @@ -54,10 +48,7 @@ impl Service { self.db.get_shortroomid(room_id) } - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - ) -> Result { + pub fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { self.db.get_or_create_shortroomid(room_id) } } diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 8eca21d1..3aa49146 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,7 +1,7 @@ -use std::sync::Arc; -use std::collections::HashSet; use crate::Result; use ruma::{EventId, RoomId}; +use std::collections::HashSet; +use std::sync::Arc; use tokio::sync::MutexGuard; pub trait Data: Send + Sync { @@ -9,7 +9,10 @@ pub trait Data: Send + Sync { fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; /// Update the current state of the room. - fn set_room_state(&self, room_id: &RoomId, new_shortstatehash: u64, + fn set_room_state( + &self, + room_id: &RoomId, + new_shortstatehash: u64, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; @@ -20,7 +23,8 @@ pub trait Data: Send + Sync { fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; /// Replace the forward extremities of the room. - fn set_forward_extremities<'a>(&self, + fn set_forward_extremities<'a>( + &self, room_id: &RoomId, event_ids: Vec>, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 57a0e773..2dff4b71 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -1,13 +1,24 @@ mod data; -use std::{collections::{HashSet, HashMap}, sync::Arc}; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; pub use data::Data; -use ruma::{RoomId, events::{room::{member::MembershipState, create::RoomCreateEventContent}, AnyStrippedStateEvent, StateEventType, RoomEventType}, UserId, EventId, serde::Raw, RoomVersionId, state_res::{StateMap, self}}; +use ruma::{ + events::{ + room::{create::RoomCreateEventContent, member::MembershipState}, + AnyStrippedStateEvent, RoomEventType, StateEventType, + }, + serde::Raw, + state_res::{self, StateMap}, + EventId, RoomId, RoomVersionId, UserId, +}; use serde::Deserialize; use tokio::sync::MutexGuard; use tracing::warn; -use crate::{Result, services, PduEvent, Error, utils::calculate_hash}; +use crate::{services, utils::calculate_hash, Error, PduEvent, Result}; use super::state_compressor::CompressedStateEvent; @@ -25,7 +36,8 @@ impl Service { statediffremoved: HashSet, ) -> Result<()> { let mutex_state = Arc::clone( - services().globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -35,7 +47,10 @@ impl Service { let state_lock = mutex_state.lock().await; for event_id in statediffnew.into_iter().filter_map(|new| { - services().rooms.state_compressor.parse_compressed_state_event(new) + services() + .rooms + .state_compressor + .parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { @@ -75,7 +90,14 @@ impl Service { Err(_) => continue, }; - services().rooms.state_cache.update_membership(room_id, &user_id, membership, &pdu.sender, None, false)?; + services().rooms.state_cache.update_membership( + room_id, + &user_id, + membership, + &pdu.sender, + None, + false, + )?; } services().rooms.state_cache.update_joined_count(room_id)?; @@ -98,7 +120,10 @@ impl Service { room_id: &RoomId, state_ids_compressed: HashSet, ) -> Result { - let shorteventid = services().rooms.short.get_or_create_shorteventid(event_id)?; + let shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(event_id)?; let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; @@ -109,12 +134,21 @@ impl Service { .collect::>(), ); - let (shortstatehash, already_existed) = - services().rooms.short.get_or_create_shortstatehash(&state_hash)?; + let (shortstatehash, already_existed) = services() + .rooms + .short + .get_or_create_shortstatehash(&state_hash)?; if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| services().rooms.state_compressor.load_shortstatehash_info(p))?; + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(p) + }, + )?; let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() { @@ -152,11 +186,11 @@ impl Service { /// This adds all current state events (not including the incoming event) /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. #[tracing::instrument(skip(self, new_pdu))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - ) -> Result { - let shorteventid = services().rooms.short.get_or_create_shorteventid(&new_pdu.event_id)?; + pub fn append_to_state(&self, new_pdu: &PduEvent) -> Result { + let shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(&new_pdu.event_id)?; let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; @@ -165,15 +199,25 @@ impl Service { } if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| services().rooms.state_compressor.load_shortstatehash_info(p))?; - - let shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &new_pdu.kind.to_string().into(), - state_key, + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(p) + }, )?; - let new = services().rooms.state_compressor.compress_state_event(shortstatekey, &new_pdu.event_id)?; + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key)?; + + let new = services() + .rooms + .state_compressor + .compress_state_event(shortstatekey, &new_pdu.event_id)?; let replaces = states_parents .last() @@ -220,14 +264,18 @@ impl Service { ) -> Result>> { let mut state = Vec::new(); // Add recommended events - if let Some(e) = - services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomCreate, "")? - { + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomCreate, + "", + )? { state.push(e.to_stripped_state_event()); } - if let Some(e) = - services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomJoinRules, "")? - { + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomJoinRules, + "", + )? { state.push(e.to_stripped_state_event()); } if let Some(e) = services().rooms.state_accessor.room_state_get( @@ -237,14 +285,18 @@ impl Service { )? { state.push(e.to_stripped_state_event()); } - if let Some(e) = - services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomAvatar, "")? - { + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomAvatar, + "", + )? { state.push(e.to_stripped_state_event()); } - if let Some(e) = - services().rooms.state_accessor.room_state_get(&invite_event.room_id, &StateEventType::RoomName, "")? - { + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomName, + "", + )? { state.push(e.to_stripped_state_event()); } if let Some(e) = services().rooms.state_accessor.room_state_get( @@ -260,16 +312,23 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64, + pub fn set_room_state( + &self, + room_id: &RoomId, + shortstatehash: u64, mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex - ) -> Result<()> { + ) -> Result<()> { self.db.set_room_state(room_id, shortstatehash, mutex_lock) } /// Returns the room's version. #[tracing::instrument(skip(self))] pub fn get_room_version(&self, room_id: &RoomId) -> Result { - let create_event = services().rooms.state_accessor.room_state_get(room_id, &StateEventType::RoomCreate, "")?; + let create_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )?; let create_event_content: Option = create_event .as_ref() @@ -294,12 +353,14 @@ impl Service { self.db.get_forward_extremities(room_id) } - pub fn set_forward_extremities<'a>(&self, + pub fn set_forward_extremities<'a>( + &self, room_id: &RoomId, event_ids: Vec>, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { - self.db.set_forward_extremities(room_id, event_ids, state_lock) + self.db + .set_forward_extremities(room_id, event_ids, state_lock) } /// This fetches auth events from the current state. @@ -312,12 +373,13 @@ impl Service { state_key: Option<&str>, content: &serde_json::value::RawValue, ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = services().rooms.state.get_room_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; + let shortstatehash = if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + current_shortstatehash + } else { + return Ok(HashMap::new()); + }; let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) .expect("content is a valid JSON object"); @@ -325,14 +387,19 @@ impl Service { let mut sauthevents = auth_events .into_iter() .filter_map(|(event_type, state_key)| { - services().rooms.short.get_shortstatekey(&event_type.to_string().into(), &state_key) + services() + .rooms + .short + .get_shortstatekey(&event_type.to_string().into(), &state_key) .ok() .flatten() .map(|s| (s, (event_type, state_key))) }) .collect::>(); - let full_state = services().rooms.state_compressor + let full_state = services() + .rooms + .state_compressor .load_shortstatehash_info(shortstatehash)? .pop() .expect("there is always one layer") @@ -340,11 +407,25 @@ impl Service { Ok(full_state .into_iter() - .filter_map(|compressed| services().rooms.state_compressor.parse_compressed_state_event(compressed).ok()) + .filter_map(|compressed| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(compressed) + .ok() + }) .filter_map(|(shortstatekey, event_id)| { sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) }) - .filter_map(|(k, event_id)| services().rooms.timeline.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) + .filter_map(|(k, event_id)| { + services() + .rooms + .timeline + .get_pdu(&event_id) + .ok() + .flatten() + .map(|pdu| (k, pdu)) + }) .collect()) } } diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 14f96bc8..340b19c3 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -1,9 +1,12 @@ -use std::{sync::Arc, collections::{HashMap, BTreeMap}}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; use async_trait::async_trait; -use ruma::{EventId, events::StateEventType, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result, PduEvent}; +use crate::{PduEvent, Result}; #[async_trait] pub trait Data: Send + Sync { diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index a0f5523b..e179d70f 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,10 +1,13 @@ mod data; -use std::{sync::Arc, collections::{HashMap, BTreeMap}}; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; pub use data::Data; -use ruma::{events::StateEventType, RoomId, EventId}; +use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result, PduEvent}; +use crate::{PduEvent, Result}; pub struct Service { db: Arc, diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index 950143ff..a6b06a53 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -1,12 +1,21 @@ use std::{collections::HashSet, sync::Arc}; -use ruma::{UserId, RoomId, serde::Raw, events::{AnyStrippedStateEvent, AnySyncStateEvent}, ServerName}; use crate::Result; +use ruma::{ + events::{AnyStrippedStateEvent, AnySyncStateEvent}, + serde::Raw, + RoomId, ServerName, UserId, +}; pub trait Data: Send + Sync { fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; - fn mark_as_invited(&self, user_id: &UserId, room_id: &RoomId, last_state: Option>>) -> Result<()>; + fn mark_as_invited( + &self, + user_id: &UserId, + room_id: &RoomId, + last_state: Option>>, + ) -> Result<()>; fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 69bd8328..04eb9afb 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -9,8 +9,8 @@ use ruma::{ ignored_user_list::IgnoredUserListEvent, room::{create::RoomCreateEventContent, member::MembershipState}, tag::{TagEvent, TagEventContent}, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, - RoomAccountDataEventType, StateEventType, RoomAccountDataEvent, RoomAccountDataEventContent, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEvent, + RoomAccountDataEventContent, RoomAccountDataEventType, StateEventType, }, serde::Raw, RoomId, ServerName, UserId, @@ -97,8 +97,9 @@ impl Service { RoomAccountDataEventType::Tag, )? .map(|event| { - serde_json::from_str(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) + serde_json::from_str(event.get()).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) }) { services() @@ -113,16 +114,19 @@ impl Service { }; // Copy direct chat flag - if let Some(mut direct_event) = services().account_data.get( - None, - user_id, - GlobalAccountDataEventType::Direct.to_string().into(), - )? + if let Some(mut direct_event) = services() + .account_data + .get( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + )? .map(|event| { - serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) + serde_json::from_str::(event.get()).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) }) - { + { let direct_event = direct_event?; let mut room_ids_updated = false; @@ -138,7 +142,8 @@ impl Service { None, user_id, GlobalAccountDataEventType::Direct.to_string().into(), - &serde_json::to_value(&direct_event).expect("to json always works"), + &serde_json::to_value(&direct_event) + .expect("to json always works"), )?; } }; @@ -158,10 +163,11 @@ impl Service { .to_string() .into(), )? - .map(|event| { - serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid account data event in db.")) - }).transpose()? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .transpose()? .map_or(false, |ignored| { ignored .content diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index 5f2cf02d..f7c6dba0 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -1,11 +1,15 @@ pub mod data; -use std::{mem::size_of, sync::{Arc, Mutex}, collections::HashSet}; +use std::{ + collections::HashSet, + mem::size_of, + sync::{Arc, Mutex}, +}; pub use data::Data; use lru_cache::LruCache; use ruma::{EventId, RoomId}; -use crate::{Result, utils, services}; +use crate::{services, utils, Result}; use self::data::StateDiff; @@ -23,7 +27,6 @@ pub struct Service { )>, >, >, - } pub type CompressedStateEvent = [u8; 2 * size_of::()]; @@ -51,7 +54,11 @@ impl Service { return Ok(r.clone()); } - let StateDiff { parent, added, removed } = self.db.get_statediff(shortstatehash)?; + let StateDiff { + parent, + added, + removed, + } = self.db.get_statediff(shortstatehash)?; if let Some(parent) = parent { let mut response = self.load_shortstatehash_info(parent)?; @@ -81,7 +88,9 @@ impl Service { ) -> Result { let mut v = shortstatekey.to_be_bytes().to_vec(); v.extend_from_slice( - &services().rooms.short + &services() + .rooms + .short .get_or_create_shorteventid(event_id)? .to_be_bytes(), ); @@ -175,7 +184,14 @@ impl Service { if parent_states.is_empty() { // There is no parent layer, create a new state - self.db.save_statediff(shortstatehash, StateDiff { parent: None, added: statediffnew, removed: statediffremoved })?; + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: None, + added: statediffnew, + removed: statediffremoved, + }, + )?; return Ok(()); }; @@ -217,7 +233,14 @@ impl Service { )?; } else { // Diff small enough, we add diff as layer on top of parent - self.db.save_statediff(shortstatehash, StateDiff { parent: Some(parent.0), added: statediffnew, removed: statediffremoved })?; + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: Some(parent.0), + added: statediffnew, + removed: statediffremoved, + }, + )?; } Ok(()) @@ -228,8 +251,7 @@ impl Service { &self, room_id: &RoomId, new_state_ids_compressed: HashSet, - ) -> Result - { + ) -> Result { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; let state_hash = utils::calculate_hash( @@ -239,8 +261,10 @@ impl Service { .collect::>(), ); - let (new_shortstatehash, already_existed) = - services().rooms.short.get_or_create_shortstatehash(&state_hash)?; + let (new_shortstatehash, already_existed) = services() + .rooms + .short + .get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { return Ok(new_shortstatehash); diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 20eae7f1..4ae8ce96 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,8 +1,8 @@ use std::sync::Arc; -use ruma::{signatures::CanonicalJsonObject, EventId, UserId, RoomId}; +use ruma::{signatures::CanonicalJsonObject, EventId, RoomId, UserId}; -use crate::{Result, PduEvent}; +use crate::{PduEvent, Result}; pub trait Data: Send + Sync { fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>>; @@ -15,10 +15,7 @@ pub trait Data: Send + Sync { fn get_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the json of a pdu. - fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result>; + fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result>; /// Returns the pdu's id. fn get_pdu_id(&self, event_id: &EventId) -> Result>>; @@ -45,7 +42,13 @@ pub trait Data: Send + Sync { fn pdu_count(&self, pdu_id: &[u8]) -> Result; /// Adds a new pdu to the timeline - fn append_pdu(&self, pdu_id: &[u8], pdu: &PduEvent, json: &CanonicalJsonObject, count: u64) -> Result<()>; + fn append_pdu( + &self, + pdu_id: &[u8], + pdu: &PduEvent, + json: &CanonicalJsonObject, + count: u64, + ) -> Result<()>; /// Removes a pdu and creates a new one with the same id. fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; @@ -75,5 +78,10 @@ pub trait Data: Send + Sync { from: u64, ) -> Result, PduEvent)>>>>; - fn increment_notification_counts(&self, room_id: &RoomId, notifies: Vec>, highlights: Vec>) -> Result<()>; + fn increment_notification_counts( + &self, + room_id: &RoomId, + notifies: Vec>, + highlights: Vec>, + ) -> Result<()>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index f25550d5..b71dacb5 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,9 +1,9 @@ mod data; use std::borrow::Cow; use std::collections::HashMap; -use std::sync::{Arc, Mutex}; -use std::{iter, collections::HashSet}; use std::fmt::Debug; +use std::sync::{Arc, Mutex}; +use std::{collections::HashSet, iter}; pub use data::Data; use regex::Regex; @@ -11,13 +11,27 @@ use ruma::events::room::power_levels::RoomPowerLevelsEventContent; use ruma::push::Ruleset; use ruma::signatures::CanonicalJsonValue; use ruma::state_res::RoomVersion; -use ruma::{EventId, signatures::CanonicalJsonObject, push::{Action, Tweak}, events::{push_rules::PushRulesEvent, GlobalAccountDataEventType, RoomEventType, room::{member::MembershipState, create::RoomCreateEventContent}, StateEventType}, UserId, RoomAliasId, RoomId, uint, state_res, api::client::error::ErrorKind, serde::to_canonical_value, ServerName}; +use ruma::{ + api::client::error::ErrorKind, + events::{ + push_rules::PushRulesEvent, + room::{create::RoomCreateEventContent, member::MembershipState}, + GlobalAccountDataEventType, RoomEventType, StateEventType, + }, + push::{Action, Tweak}, + serde::to_canonical_value, + signatures::CanonicalJsonObject, + state_res, uint, EventId, RoomAliasId, RoomId, ServerName, UserId, +}; use serde::Deserialize; use serde_json::value::to_raw_value; use tokio::sync::MutexGuard; -use tracing::{warn, error}; +use tracing::{error, warn}; -use crate::{services, Result, service::pdu::{PduBuilder, EventHash}, Error, PduEvent, utils}; +use crate::{ + service::pdu::{EventHash, PduBuilder}, + services, utils, Error, PduEvent, Result, +}; use super::state_compressor::CompressedStateEvent; @@ -135,7 +149,11 @@ impl Service { leaves: Vec>, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let shortroomid = services().rooms.short.get_shortroomid(&pdu.room_id)?.expect("room exists"); + let shortroomid = services() + .rooms + .short + .get_shortroomid(&pdu.room_id)? + .expect("room exists"); // Make unsigned fields correct. This is not properly documented in the spec, but state // events need to have previous content in the unsigned field, so clients can easily @@ -145,8 +163,15 @@ impl Service { .entry("unsigned".to_owned()) .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) { - if let Some(shortstatehash) = services().rooms.state_accessor.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = services().rooms.state_accessor + if let Some(shortstatehash) = services() + .rooms + .state_accessor + .pdu_shortstatehash(&pdu.event_id) + .unwrap() + { + if let Some(prev_state) = services() + .rooms + .state_accessor .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) .unwrap() { @@ -165,11 +190,18 @@ impl Service { } // We must keep track of all events that have been referenced. - services().rooms.pdu_metadata.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - services().rooms.state.set_forward_extremities(&pdu.room_id, leaves, state_lock)?; + services() + .rooms + .pdu_metadata + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + services() + .rooms + .state + .set_forward_extremities(&pdu.room_id, leaves, state_lock)?; let mutex_insert = Arc::clone( - services().globals + services() + .globals .roomid_mutex_insert .write() .unwrap() @@ -181,9 +213,15 @@ impl Service { let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails - services().rooms.edus.read_receipt + services() + .rooms + .edus + .read_receipt .private_read_set(&pdu.room_id, &pdu.sender, count1)?; - services().rooms.user.reset_notification_counts(&pdu.sender, &pdu.room_id)?; + services() + .rooms + .user + .reset_notification_counts(&pdu.sender, &pdu.room_id)?; let count2 = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); @@ -211,7 +249,12 @@ impl Service { let mut notifies = Vec::new(); let mut highlights = Vec::new(); - for user in services().rooms.state_cache.get_our_real_users(&pdu.room_id)?.into_iter() { + for user in services() + .rooms + .state_cache + .get_our_real_users(&pdu.room_id)? + .into_iter() + { // Don't notify the user of their own events if &user == &pdu.sender { continue; @@ -224,8 +267,11 @@ impl Service { &user, GlobalAccountDataEventType::PushRules.to_string().into(), )? - .map(|event| serde_json::from_str::(event.get()) - .map_err(|_| Error::bad_database("Invalid push rules event in db."))).transpose()? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid push rules event in db.")) + }) + .transpose()? .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| Ruleset::server_default(&user)); @@ -263,7 +309,8 @@ impl Service { } } - self.db.increment_notification_counts(&pdu.room_id, notifies, highlights); + self.db + .increment_notification_counts(&pdu.room_id, notifies, highlights); match pdu.kind { RoomEventType::RoomRedaction => { @@ -315,7 +362,10 @@ impl Service { .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if let Some(body) = content.body { - services().rooms.search.index_pdu(shortroomid, &pdu_id, body)?; + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, body)?; let admin_room = services().rooms.alias.resolve_local_alias( <&RoomAliasId>::try_from( @@ -329,8 +379,8 @@ impl Service { // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit - let from_conduit = - pdu.sender == server_user && services().globals.emergency_password().is_none(); + let from_conduit = pdu.sender == server_user + && services().globals.emergency_password().is_none(); if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { services().admin.process_message(body.to_string()); @@ -341,8 +391,14 @@ impl Service { } for appservice in services().appservice.all()? { - if services().rooms.state_cache.appservice_in_room(&pdu.room_id, &appservice)? { - services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + if services() + .rooms + .state_cache + .appservice_in_room(&pdu.room_id, &appservice)? + { + services() + .sending + .send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } @@ -359,11 +415,14 @@ impl Service { .get("sender_localpart") .and_then(|string| string.as_str()) .and_then(|string| { - UserId::parse_with_server_name(string, services().globals.server_name()).ok() + UserId::parse_with_server_name(string, services().globals.server_name()) + .ok() }) { if state_key_uid == &appservice_uid { - services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + services() + .sending + .send_pdu_appservice(&appservice.0, &pdu_id)?; continue; } } @@ -402,7 +461,10 @@ impl Service { .map_or(false, |state_key| users.is_match(state_key)) }; let matching_aliases = |aliases: &Regex| { - services().rooms.alias.local_aliases_for_room(&pdu.room_id) + services() + .rooms + .alias + .local_aliases_for_room(&pdu.room_id) .filter_map(|r| r.ok()) .any(|room_alias| aliases.is_match(room_alias.as_str())) }; @@ -411,21 +473,22 @@ impl Service { || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) || users.iter().any(matching_users) { - services().sending.send_pdu_appservice(&appservice.0, &pdu_id)?; + services() + .sending + .send_pdu_appservice(&appservice.0, &pdu_id)?; } } } - Ok(pdu_id) } pub fn create_hash_and_sign_event( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<(PduEvent, CanonicalJsonObject)> { let PduBuilder { event_type, @@ -443,10 +506,11 @@ impl Service { .take(20) .collect(); - let create_event = services() - .rooms - .state_accessor - .room_state_get(room_id, &StateEventType::RoomCreate, "")?; + let create_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )?; let create_event_content: Option = create_event .as_ref() @@ -464,11 +528,15 @@ impl Service { .map_or(services().globals.default_room_version(), |create_event| { create_event.room_version }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - services().rooms.state.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + + let auth_events = services().rooms.state.get_auth_events( + room_id, + &event_type, + sender, + state_key.as_deref(), + &content, + )?; // Our depth is the maximum depth of prev_events + 1 let depth = prev_events @@ -481,9 +549,11 @@ impl Service { let mut unsigned = unsigned.unwrap_or_default(); if let Some(state_key) = &state_key { - if let Some(prev_pdu) = - services().rooms.state_accessor.room_state_get(room_id, &event_type.to_string().into(), state_key)? - { + if let Some(prev_pdu) = services().rooms.state_accessor.room_state_get( + room_id, + &event_type.to_string().into(), + state_key, + )? { unsigned.insert( "prev_content".to_owned(), serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), @@ -589,7 +659,10 @@ impl Service { ); // Generate short event id - let _shorteventid = services().rooms.short.get_or_create_shorteventid(&pdu.event_id)?; + let _shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(&pdu.event_id)?; Ok((pdu, pdu_json)) } @@ -604,7 +677,8 @@ impl Service { room_id: &RoomId, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { - let (pdu, pdu_json) = self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock)?; + let (pdu, pdu_json) = + self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. @@ -621,10 +695,17 @@ impl Service { // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - services().rooms.state.set_room_state(room_id, statehashid, state_lock)?; + services() + .rooms + .state + .set_room_state(room_id, statehashid, state_lock)?; - let mut servers: HashSet> = - services().rooms.state_cache.room_servers(room_id).filter_map(|r| r.ok()).collect(); + let mut servers: HashSet> = services() + .rooms + .state_cache + .room_servers(room_id) + .filter_map(|r| r.ok()) + .collect(); // In case we are kicking or banning a user, we need to inform their server of the change if pdu.kind == RoomEventType::RoomMember { @@ -666,13 +747,23 @@ impl Service { )?; if soft_fail { - services().rooms.pdu_metadata + services() + .rooms + .pdu_metadata .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - services().rooms.state.set_forward_extremities(&pdu.room_id, new_room_leaves, state_lock)?; + services().rooms.state.set_forward_extremities( + &pdu.room_id, + new_room_leaves, + state_lock, + )?; return Ok(None); } - let pdu_id = services().rooms.timeline.append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; + let pdu_id = + services() + .rooms + .timeline + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; Ok(Some(pdu_id)) } diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 6b7ebc72..fcaff5ac 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,5 +1,5 @@ -use ruma::{UserId, RoomId}; use crate::Result; +use ruma::{RoomId, UserId}; pub trait Data: Send + Sync { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 394a550a..1caa4b3f 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -29,7 +29,8 @@ impl Service { token: u64, shortstatehash: u64, ) -> Result<()> { - self.db.associate_token_shortstatehash(room_id, token, shortstatehash) + self.db + .associate_token_shortstatehash(room_id, token, shortstatehash) } pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b3350959..e09d423a 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -6,7 +6,10 @@ use std::{ }; use crate::{ - utils::{self, calculate_hash}, Error, PduEvent, Result, services, api::{server_server, appservice_server}, + api::{appservice_server, server_server}, + services, + utils::{self, calculate_hash}, + Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -88,10 +91,7 @@ enum TransactionStatus { } impl Service { - pub fn start_handler( - &self, - mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>, - ) { + pub fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>) { tokio::spawn(async move { let mut futures = FuturesUnordered::new(); @@ -119,7 +119,11 @@ impl Service { "Dropping some current events: {:?} {:?} {:?}", key, outgoing_kind, event ); - services().sending.servercurrentevent_data.remove(&key).unwrap(); + services() + .sending + .servercurrentevent_data + .remove(&key) + .unwrap(); continue; } @@ -129,10 +133,7 @@ impl Service { for (outgoing_kind, events) in initial_transactions { current_transaction_status .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); - futures.push(Self::handle_events( - outgoing_kind.clone(), - events, - )); + futures.push(Self::handle_events(outgoing_kind.clone(), events)); } loop { @@ -246,7 +247,11 @@ impl Service { if retry { // We retry the previous transaction - for (key, value) in services().sending.servercurrentevent_data.scan_prefix(prefix) { + for (key, value) in services() + .sending + .servercurrentevent_data + .scan_prefix(prefix) + { if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) { events.push(e); } @@ -258,7 +263,8 @@ impl Service { } else { &[][..] }; - services().sending + services() + .sending .servercurrentevent_data .insert(&full_key, value)?; @@ -273,7 +279,8 @@ impl Service { if let Ok((select_edus, last_count)) = Self::select_edus(server_name) { events.extend(select_edus.into_iter().map(SendingEventType::Edu)); - services().sending + services() + .sending .servername_educount .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; } @@ -302,7 +309,8 @@ impl Service { let room_id = room_id?; // Look for device list updates in this room device_list_changes.extend( - services().users + services() + .users .keys_changed(&room_id.to_string(), since, None) .filter_map(|r| r.ok()) .filter(|user_id| user_id.server_name() == services().globals.server_name()), @@ -502,7 +510,8 @@ impl Service { let permit = services().sending.maximum_requests.acquire().await; let response = appservice_server::send_request( - services().appservice + services() + .appservice .get_registration(&id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -621,16 +630,12 @@ impl Service { let permit = services().sending.maximum_requests.acquire().await; - let _response = services().pusher.send_push_notice( - &userid, - unread, - &pusher, - rules_for_user, - &pdu, - ) - .await - .map(|_response| kind.clone()) - .map_err(|e| (kind.clone(), e)); + let _response = services() + .pusher + .send_push_notice(&userid, unread, &pusher, rules_for_user, &pdu) + .await + .map(|_response| kind.clone()) + .map_err(|e| (kind.clone(), e)); drop(permit); } diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs index c5ff05c0..74855318 100644 --- a/src/service/transaction_ids/data.rs +++ b/src/service/transaction_ids/data.rs @@ -1,5 +1,5 @@ -use ruma::{DeviceId, UserId, TransactionId}; use crate::Result; +use ruma::{DeviceId, TransactionId, UserId}; pub trait Data: Send + Sync { fn add_txnid( diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 8d5fd0af..a473e2b1 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -3,8 +3,8 @@ use std::sync::Arc; pub use data::Data; -use ruma::{UserId, DeviceId, TransactionId}; use crate::Result; +use ruma::{DeviceId, TransactionId, UserId}; pub struct Service { db: Arc, diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index 091f0641..3b7eb2b7 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,5 +1,5 @@ -use ruma::{api::client::uiaa::UiaaInfo, DeviceId, UserId, signatures::CanonicalJsonValue}; use crate::Result; +use ruma::{api::client::uiaa::UiaaInfo, signatures::CanonicalJsonValue, DeviceId, UserId}; pub trait Data: Send + Sync { fn set_uiaa_request( diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 5444118f..8f3b3b8b 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -3,10 +3,17 @@ use std::sync::Arc; pub use data::Data; -use ruma::{api::client::{uiaa::{UiaaInfo, IncomingAuthData, IncomingPassword, AuthType, IncomingUserIdentifier}, error::ErrorKind}, DeviceId, UserId, signatures::CanonicalJsonValue}; +use ruma::{ + api::client::{ + error::ErrorKind, + uiaa::{AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier, UiaaInfo}, + }, + signatures::CanonicalJsonValue, + DeviceId, UserId, +}; use tracing::error; -use crate::{Result, utils, Error, services, api::client_server::SESSION_ID_LENGTH}; +use crate::{api::client_server::SESSION_ID_LENGTH, services, utils, Error, Result}; pub struct Service { db: Arc, @@ -68,11 +75,11 @@ impl Service { } }; - let user_id = - UserId::parse_with_server_name(username.clone(), services().globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") - })?; + let user_id = UserId::parse_with_server_name( + username.clone(), + services().globals.server_name(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; // Check if password is correct if let Some(hash) = services().users.password_hash(&user_id)? { diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 7eb0cebd..9f315d3b 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -1,6 +1,12 @@ -use std::collections::BTreeMap; use crate::Result; -use ruma::{UserId, DeviceId, DeviceKeyAlgorithm, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, DeviceKeys, CrossSigningKey}, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition}, MxcUri}; +use ruma::{ + api::client::{device::Device, filter::IncomingFilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, UInt, UserId, +}; +use std::collections::BTreeMap; pub trait Data: Send + Sync { /// Check if a user has an account on this homeserver. @@ -127,10 +133,7 @@ pub trait Data: Send + Sync { to: Option, ) -> Box>>>; - fn mark_device_key_update( - &self, - user_id: &UserId, - ) -> Result<()>; + fn mark_device_key_update(&self, user_id: &UserId) -> Result<()>; fn get_device_keys( &self, @@ -182,11 +185,8 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Get device metadata. - fn get_device_metadata( - &self, - user_id: &UserId, - device_id: &DeviceId, - ) -> Result>; + fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) + -> Result>; fn get_devicelist_version(&self, user_id: &UserId) -> Result>; @@ -196,11 +196,7 @@ pub trait Data: Send + Sync { ) -> Box>>; /// Creates a new sync filter. Returns the filter id. - fn create_filter( - &self, - user_id: &UserId, - filter: &IncomingFilterDefinition, - ) -> Result; + fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result; fn get_filter( &self, diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 826e0494..0b83460c 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -2,9 +2,15 @@ mod data; use std::{collections::BTreeMap, mem, sync::Arc}; pub use data::Data; -use ruma::{UserId, MxcUri, DeviceId, DeviceKeyId, serde::Raw, encryption::{OneTimeKey, CrossSigningKey, DeviceKeys}, DeviceKeyAlgorithm, UInt, events::AnyToDeviceEvent, api::client::{device::Device, filter::IncomingFilterDefinition, error::ErrorKind}, RoomAliasId}; +use ruma::{ + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, RoomAliasId, UInt, UserId, +}; -use crate::{Result, Error, services}; +use crate::{services, Error, Result}; pub struct Service { db: Arc, @@ -22,15 +28,20 @@ impl Service { } /// Check if a user is an admin - pub fn is_admin( - &self, - user_id: &UserId, - ) -> Result { - let admin_room_alias_id = RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = services().rooms.alias.resolve_local_alias(&admin_room_alias_id)?.unwrap(); - - services().rooms.state_cache.is_joined(user_id, &admin_room_id) + pub fn is_admin(&self, user_id: &UserId) -> Result { + let admin_room_alias_id = + RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_id = services() + .rooms + .alias + .resolve_local_alias(&admin_room_alias_id)? + .unwrap(); + + services() + .rooms + .state_cache + .is_joined(user_id, &admin_room_id) } /// Create a new user account on this homeserver. @@ -39,7 +50,6 @@ impl Service { Ok(()) } - /// Returns the number of users registered on this server. pub fn count(&self) -> Result { self.db.count() @@ -118,7 +128,8 @@ impl Service { token: &str, initial_device_display_name: Option, ) -> Result<()> { - self.db.create_device(user_id, device_id, token, initial_device_display_name) + self.db + .create_device(user_id, device_id, token, initial_device_display_name) } /// Removes a device from a user. @@ -146,7 +157,8 @@ impl Service { one_time_key_key: &DeviceKeyId, one_time_key_value: &Raw, ) -> Result<()> { - self.db.add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) + self.db + .add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) } pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { @@ -186,7 +198,8 @@ impl Service { self_signing_key: &Option>, user_signing_key: &Option>, ) -> Result<()> { - self.db.add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) + self.db + .add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) } pub fn sign_key( @@ -208,10 +221,7 @@ impl Service { self.db.keys_changed(user_or_room_id, from, to) } - pub fn mark_device_key_update( - &self, - user_id: &UserId, - ) -> Result<()> { + pub fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { self.db.mark_device_key_update(user_id) } @@ -251,7 +261,13 @@ impl Service { event_type: &str, content: serde_json::Value, ) -> Result<()> { - self.db.add_to_device_event(sender, target_user_id, target_device_id, event_type, content) + self.db.add_to_device_event( + sender, + target_user_id, + target_device_id, + event_type, + content, + ) } pub fn get_to_device_events( diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 0ee3ae84..9202eebb 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -78,7 +78,6 @@ pub fn calculate_hash(keys: &[&[u8]]) -> Vec { hash.as_ref().to_owned() } - pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, From f47a5cd5d5ce20b5996d28e37415771ee6d8a34b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 5 Oct 2022 20:41:05 +0200 Subject: [PATCH 392/445] cargo fix --- src/api/client_server/account.rs | 7 +++---- src/api/client_server/membership.rs | 7 +------ src/api/client_server/tag.rs | 6 +++--- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 27 +++++++++----------------- src/database/key_value/account_data.rs | 8 +++----- src/database/key_value/rooms/state.rs | 2 +- src/database/key_value/users.rs | 2 +- src/database/mod.rs | 9 +++------ src/lib.rs | 3 +-- src/service/account_data/mod.rs | 11 ++++------- src/service/admin/mod.rs | 3 +-- src/service/globals/mod.rs | 6 +++--- src/service/key_backups/mod.rs | 3 +-- src/service/media/mod.rs | 4 ++-- src/service/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/short/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 10 ++++------ src/service/rooms/timeline/mod.rs | 6 +++--- src/service/sending/mod.rs | 6 +++--- 21 files changed, 50 insertions(+), 78 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 28d6c07f..e27d295e 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -1,4 +1,4 @@ -use std::sync::Arc; + use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{api::client_server, services, utils, Error, Result, Ruma}; @@ -13,14 +13,13 @@ use ruma::{ }, events::{ room::{ - member::{MembershipState, RoomMemberEventContent}, message::RoomMessageEventContent, }, - GlobalAccountDataEventType, RoomEventType, + GlobalAccountDataEventType, }, push, UserId, }; -use serde_json::value::to_raw_value; + use tracing::{info, warn}; use register::RegistrationKind; diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 5de8ce1e..8ccaa89c 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -12,26 +12,21 @@ use ruma::{ }, events::{ room::{ - create::RoomCreateEventContent, member::{MembershipState, RoomMemberEventContent}, }, RoomEventType, StateEventType, }, - serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, RoomVersion}, - uint, EventId, RoomId, RoomVersionId, ServerName, UserId, + serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, EventId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - iter, sync::{Arc, RwLock}, time::{Duration, Instant}, }; use tracing::{debug, error, warn}; use crate::{ - api::{client_server, server_server}, service::pdu::{gen_event_id_canonical_json, PduBuilder}, services, utils, Error, PduEvent, Result, Ruma, }; diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs index cb46d9c6..c87e2335 100644 --- a/src/api/client_server/tag.rs +++ b/src/api/client_server/tag.rs @@ -62,7 +62,7 @@ pub async fn delete_tag_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = services().account_data.get( + let event = services().account_data.get( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, @@ -103,13 +103,13 @@ pub async fn get_tags_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut event = services().account_data.get( + let event = services().account_data.get( Some(&body.room_id), sender_user, RoomAccountDataEventType::Tag, )?; - let mut tags_event = event + let tags_event = event .map(|e| { serde_json::from_str(e.get()) .map_err(|_| Error::bad_database("Invalid account data event in db.")) diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index ee8c9e70..2d986a5c 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -24,7 +24,7 @@ use serde::Deserialize; use tracing::{debug, error, warn}; use super::{Ruma, RumaResponse}; -use crate::{api::server_server, services, Error, Result}; +use crate::{services, Error, Result}; #[async_trait] impl FromRequest for Ruma diff --git a/src/api/server_server.rs b/src/api/server_server.rs index dba44893..c832b0d4 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -4,10 +4,10 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; -use futures_util::{stream::FuturesUnordered, StreamExt}; +use futures_util::{StreamExt}; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; -use regex::Regex; + use ruma::{ api::{ client::error::{Error as RumaError, ErrorKind}, @@ -16,8 +16,7 @@ use ruma::{ device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{ - get_remote_server_keys, get_remote_server_keys_batch, - get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys, + get_server_keys, get_server_version, ServerSigningKeys, VerifyKey, }, event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, @@ -40,36 +39,28 @@ use ruma::{ events::{ receipt::{ReceiptEvent, ReceiptEventContent}, room::{ - create::RoomCreateEventContent, join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, - server_acl::RoomServerAclEventContent, }, RoomEventType, StateEventType, }, - int, receipt::ReceiptType, serde::{Base64, JsonObject, Raw}, - signatures::{CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, RoomVersion, StateMap}, - to_device::DeviceIdOrAllDevices, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + signatures::{CanonicalJsonValue}, + to_device::DeviceIdOrAllDevices, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, + collections::{BTreeMap}, fmt::Debug, - future::Future, mem, net::{IpAddr, SocketAddr}, - ops::Deref, - pin::Pin, - sync::{Arc, RwLock, RwLockWriteGuard}, + sync::{Arc, RwLock}, time::{Duration, Instant, SystemTime}, }; -use tokio::sync::{MutexGuard, Semaphore}; -use tracing::{debug, error, info, trace, warn}; + +use tracing::{info, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 7d2a870e..0e8029ff 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,13 +1,11 @@ use std::collections::HashMap; use ruma::{ - api::client::{error::ErrorKind, uiaa::UiaaInfo}, + api::client::{error::ErrorKind}, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - signatures::CanonicalJsonValue, - DeviceId, RoomId, UserId, + serde::Raw, RoomId, UserId, }; -use serde::{de::DeserializeOwned, Serialize}; + use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index 80a74589..dbc1398c 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,6 +1,6 @@ use ruma::{EventId, RoomId}; use std::collections::HashSet; -use std::fmt::Debug; + use std::sync::Arc; use tokio::sync::MutexGuard; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 15699a16..791e2498 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,7 +5,7 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, RoomAliasId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, UInt, UserId, }; use tracing::warn; diff --git a/src/database/mod.rs b/src/database/mod.rs index 8a7c78e7..c4e64af8 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -3,16 +3,14 @@ pub mod key_value; use crate::{ service::{ - account_data, appservice, globals, key_backups, media, pusher, - rooms::{self, state_compressor::CompressedStateEvent}, - sending, transaction_ids, uiaa, users, + rooms::{state_compressor::CompressedStateEvent}, }, services, utils, Config, Error, PduEvent, Result, Services, SERVICES, }; use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; -use futures_util::{stream::FuturesUnordered, StreamExt}; +use futures_util::{StreamExt}; use lru_cache::LruCache; use ruma::{ events::{ @@ -28,11 +26,10 @@ use std::{ fs::{self, remove_dir_all}, io::Write, mem::size_of, - ops::Deref, path::Path, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::{mpsc, OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; +use tokio::sync::{mpsc}; use tracing::{debug, error, info, warn}; pub struct KeyValueDatabase { diff --git a/src/lib.rs b/src/lib.rs index e6421e8e..0afc75f1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -14,8 +14,7 @@ mod service; mod utils; use std::{ - cell::Cell, - sync::{Arc, RwLock}, + sync::{RwLock}, }; pub use api::ruma_wrapper::{Ruma, RumaResponse}; diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 5bf167d1..60a53080 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -3,17 +3,14 @@ mod data; pub use data::Data; use ruma::{ - api::client::error::ErrorKind, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, - signatures::CanonicalJsonValue, - DeviceId, RoomId, UserId, + serde::Raw, RoomId, UserId, }; -use serde::{de::DeserializeOwned, Serialize}; + use std::{collections::HashMap, sync::Arc}; -use tracing::error; -use crate::{service::*, services, utils, Error, Result}; + +use crate::{Result}; pub struct Service { db: Arc, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index db596a35..8725e674 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -26,12 +26,11 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; -use tokio::sync::{mpsc, MutexGuard, RwLock, RwLockReadGuard}; +use tokio::sync::{mpsc, MutexGuard}; use crate::{ api::{ client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, - server_server, }, services, utils::{self, HtmlEscape}, diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 23a61599..6e03c156 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -2,15 +2,15 @@ mod data; pub use data::Data; use crate::api::server_server::FedDest; -use crate::service::*; -use crate::{utils, Config, Error, Result}; + +use crate::{Config, Error, Result}; use ruma::{ api::{ client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, + DeviceId, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use std::{ diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 41ec1c1b..31652d2f 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,11 +1,10 @@ mod data; pub use data::Data; -use crate::{services, utils, Error, Result}; +use crate::{Result}; use ruma::{ api::client::{ backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - error::ErrorKind, }, serde::Raw, RoomId, UserId, diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index ea276c04..61a733a7 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,9 +1,9 @@ mod data; pub use data::Data; -use crate::{services, utils, Error, Result}; +use crate::{services, Result}; use image::{imageops::FilterType, GenericImageView}; -use std::{mem, sync::Arc}; +use std::{sync::Arc}; use tokio::{ fs::File, io::{AsyncReadExt, AsyncWriteExt}, diff --git a/src/service/mod.rs b/src/service/mod.rs index dbddf405..e1c6f7a4 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{BTreeMap, HashMap}, + collections::{HashMap}, sync::{Arc, Mutex}, }; diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 689f6780..12320388 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -34,7 +34,7 @@ use ruma::{ state_res::{self, RoomVersion, StateMap}, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use serde_json::value::{RawValue as RawJsonValue}; use tracing::{debug, error, info, trace, warn}; use crate::{service::*, services, Error, PduEvent, Result}; diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index 1d2e0407..efa4362a 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; pub use data::Data; use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{services, utils, Error, Result}; +use crate::{Result}; pub struct Service { db: Arc, diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 04eb9afb..608dbcaa 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -2,21 +2,19 @@ mod data; use std::{collections::HashSet, sync::Arc}; pub use data::Data; -use regex::Regex; + use ruma::{ events::{ - direct::{DirectEvent, DirectEventContent}, + direct::{DirectEvent}, ignored_user_list::IgnoredUserListEvent, room::{create::RoomCreateEventContent, member::MembershipState}, - tag::{TagEvent, TagEventContent}, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEvent, - RoomAccountDataEventContent, RoomAccountDataEventType, StateEventType, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEventType, StateEventType, }, serde::Raw, RoomId, ServerName, UserId, }; -use crate::{services, utils, Error, Result}; +use crate::{services, Error, Result}; pub struct Service { db: Arc, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index b71dacb5..73f1451b 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -1,9 +1,9 @@ mod data; -use std::borrow::Cow; + use std::collections::HashMap; -use std::fmt::Debug; + use std::sync::{Arc, Mutex}; -use std::{collections::HashSet, iter}; +use std::{collections::HashSet}; pub use data::Data; use regex::Regex; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index e09d423a..e5e8cffd 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -13,7 +13,7 @@ use crate::{ }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; -use ring::digest; + use ruma::{ api::{ appservice, @@ -33,7 +33,7 @@ use ruma::{ }; use tokio::{ select, - sync::{mpsc, RwLock, Semaphore}, + sync::{mpsc, Semaphore}, }; use tracing::{error, warn}; @@ -297,7 +297,7 @@ impl Service { .sending .servername_educount .get(server.as_bytes())? - .map_or(Ok(0), |bytes| { + .map_or(Ok(0), |&bytes| { utils::u64_from_bytes(&bytes) .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) })?; From d5b4754cf47982c91898bde9a9bb61a8cbf6ab40 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 13:02:52 +0200 Subject: [PATCH 393/445] 0 errors left! --- src/api/client_server/membership.rs | 13 +- src/api/server_server.rs | 2 +- src/database/key_value/media.rs | 4 +- src/database/key_value/mod.rs | 2 +- src/database/key_value/pusher.rs | 21 +- src/database/key_value/rooms/alias.rs | 6 +- src/database/key_value/rooms/directory.rs | 2 +- .../key_value/rooms/edus/read_receipt.rs | 2 +- src/database/key_value/rooms/metadata.rs | 14 +- src/database/key_value/rooms/search.rs | 4 +- src/database/key_value/rooms/timeline.rs | 10 +- src/database/key_value/rooms/user.rs | 2 +- src/database/key_value/sending.rs | 203 +++++++++ src/database/key_value/users.rs | 55 +-- src/database/mod.rs | 36 +- src/lib.rs | 1 + src/main.rs | 17 +- src/service/account_data/mod.rs | 2 +- src/service/admin/mod.rs | 140 +++--- src/service/appservice/mod.rs | 2 +- src/service/globals/mod.rs | 6 +- src/service/key_backups/mod.rs | 2 +- src/service/media/mod.rs | 2 +- src/service/mod.rs | 72 +-- src/service/pusher/data.rs | 4 +- src/service/pusher/mod.rs | 12 +- src/service/rooms/alias/data.rs | 6 +- src/service/rooms/alias/mod.rs | 4 +- src/service/rooms/auth_chain/mod.rs | 2 +- src/service/rooms/directory/data.rs | 2 +- src/service/rooms/directory/mod.rs | 2 +- src/service/rooms/edus/mod.rs | 2 +- src/service/rooms/edus/presence/mod.rs | 2 +- src/service/rooms/edus/read_receipt/data.rs | 6 +- src/service/rooms/edus/read_receipt/mod.rs | 2 +- src/service/rooms/edus/typing/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 4 +- src/service/rooms/lazy_loading/mod.rs | 6 +- src/service/rooms/metadata/data.rs | 1 + src/service/rooms/metadata/mod.rs | 6 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/pdu_metadata/mod.rs | 2 +- src/service/rooms/search/data.rs | 4 +- src/service/rooms/search/mod.rs | 4 +- src/service/rooms/short/mod.rs | 2 +- src/service/rooms/state/mod.rs | 6 +- src/service/rooms/state_accessor/mod.rs | 2 +- src/service/rooms/state_cache/mod.rs | 6 +- src/service/rooms/state_compressor/mod.rs | 2 +- src/service/rooms/timeline/data.rs | 6 +- src/service/rooms/timeline/mod.rs | 28 +- src/service/rooms/user/data.rs | 2 +- src/service/rooms/user/mod.rs | 2 +- src/service/sending/data.rs | 29 ++ src/service/sending/mod.rs | 425 ++++++------------ src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/mod.rs | 2 +- src/service/users/data.rs | 14 +- src/service/users/mod.rs | 14 +- 59 files changed, 665 insertions(+), 572 deletions(-) create mode 100644 src/database/key_value/sending.rs create mode 100644 src/service/sending/data.rs diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 8ccaa89c..0aae9959 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -670,7 +670,7 @@ async fn join_room_by_id_helper( .add_pdu_outlier(&event_id, &value)?; } - let shortstatehash = services().rooms.state.set_event_state( + let statehash_before_join = services().rooms.state.set_event_state( event_id, room_id, state @@ -684,8 +684,15 @@ async fn join_room_by_id_helper( .collect::>()?, )?; + services() + .rooms + .state + .set_room_state(room_id, statehash_before_join, &state_lock)?; + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. + let statehash_after_join = services().rooms.state.append_to_state(&parsed_pdu)?; + services().rooms.timeline.append_pdu( &parsed_pdu, join_event, @@ -698,9 +705,7 @@ async fn join_room_by_id_helper( services() .rooms .state - .set_room_state(room_id, shortstatehash, &state_lock)?; - - let statehashid = services().rooms.state.append_to_state(&parsed_pdu)?; + .set_room_state(room_id, statehash_after_join, &state_lock)?; } else { let event = RoomMemberEventContent { membership: MembershipState::Join, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index c832b0d4..bcf893c6 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1319,7 +1319,7 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + let (_pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( PduBuilder { event_type: RoomEventType::RoomMember, content, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs index de96ace1..6abe5ba5 100644 --- a/src/database/key_value/media.rs +++ b/src/database/key_value/media.rs @@ -43,8 +43,8 @@ impl service::media::Data for KeyValueDatabase { ) -> Result<(Option, Option, Vec)> { let mut prefix = mxc.as_bytes().to_vec(); prefix.push(0xff); - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail + prefix.extend_from_slice(&width.to_be_bytes()); + prefix.extend_from_slice(&height.to_be_bytes()); prefix.push(0xff); let (key, _) = self diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs index efb85509..c4496af8 100644 --- a/src/database/key_value/mod.rs +++ b/src/database/key_value/mod.rs @@ -7,7 +7,7 @@ mod media; //mod pdu; mod pusher; mod rooms; -//mod sending; +mod sending; mod transaction_ids; mod uiaa; mod users; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 15f4e26e..1468a553 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -3,7 +3,7 @@ use ruma::{ UserId, }; -use crate::{database::KeyValueDatabase, service, Error, Result}; +use crate::{database::KeyValueDatabase, service, Error, Result, utils}; impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { @@ -28,9 +28,13 @@ impl service::pusher::Data for KeyValueDatabase { Ok(()) } - fn get_pusher(&self, senderkey: &[u8]) -> Result> { + fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + let mut senderkey = sender.as_bytes().to_vec(); + senderkey.push(0xff); + senderkey.extend_from_slice(pushkey.as_bytes()); + self.senderkey_pusher - .get(senderkey)? + .get(&senderkey)? .map(|push| { serde_json::from_slice(&*push) .map_err(|_| Error::bad_database("Invalid Pusher in db.")) @@ -51,10 +55,17 @@ impl service::pusher::Data for KeyValueDatabase { .collect() } - fn get_pusher_senderkeys<'a>(&'a self, sender: &UserId) -> Box>> { + fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); - Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k)) + Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { + let mut parts = k.splitn(2, |&b| b == 0xff); + let _senderkey = parts.next(); + let push_key = parts.next().ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; + let push_key_string = utils::string_from_bytes(push_key).map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; + + Ok(push_key_string) + })) } } diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index 112d6eb0..f3de89da 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -43,10 +43,10 @@ impl service::rooms::alias::Data for KeyValueDatabase { .transpose() } - fn local_aliases_for_room( - &self, + fn local_aliases_for_room<'a>( + &'a self, room_id: &RoomId, - ) -> Box>>> { + ) -> Box>> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 661c202d..212ced91 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -15,7 +15,7 @@ impl service::rooms::directory::Data for KeyValueDatabase { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - fn public_rooms(&self) -> Box>>> { + fn public_rooms<'a>(&'a self) -> Box>> + 'a> { Box::new(self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index c78f0f51..19c1ced7 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -59,7 +59,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { u64, Raw, )>, - >, + > + 'a, > { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 63a6b1aa..2ec18bed 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{database::KeyValueDatabase, service, services, Result}; +use crate::{database::KeyValueDatabase, service, services, Result, utils, Error}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { @@ -18,6 +18,18 @@ impl service::rooms::metadata::Data for KeyValueDatabase { .is_some()) } + fn iter_ids<'a>(&'a self) -> Box>> + 'a> { + Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) + })) + + } + fn is_disabled(&self, room_id: &RoomId) -> Result { Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) } diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 79e6a326..8aa7a639 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -5,7 +5,7 @@ use ruma::RoomId; use crate::{database::KeyValueDatabase, service, services, utils, Result}; impl service::rooms::search::Data for KeyValueDatabase { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()> { + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { let mut batch = message_body .split_terminator(|c: char| !c.is_alphanumeric()) .filter(|s| !s.is_empty()) @@ -26,7 +26,7 @@ impl service::rooms::search::Data for KeyValueDatabase { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result>>, Vec)>> { + ) -> Result>+ 'a>, Vec)>> { let prefix = services() .rooms .short diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 5d684a1b..1660a9ec 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -235,7 +235,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>>>> { + ) -> Result, PduEvent)>> + 'a>> { let prefix = services() .rooms .short @@ -272,7 +272,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>>>> { + ) -> Result, PduEvent)>> + 'a>> { // Create the first part of the full pdu id let prefix = services() .rooms @@ -309,7 +309,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>>>> { + ) -> Result, PduEvent)>> + 'a>> { // Create the first part of the full pdu id let prefix = services() .rooms @@ -347,8 +347,8 @@ impl service::rooms::timeline::Data for KeyValueDatabase { notifies: Vec>, highlights: Vec>, ) -> Result<()> { - let notifies_batch = Vec::new(); - let highlights_batch = Vec::new(); + let mut notifies_batch = Vec::new(); + let mut highlights_batch = Vec::new(); for user in notifies { let mut userroom_id = user.as_bytes().to_vec(); userroom_id.push(0xff); diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 78c78e19..9230e611 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -86,7 +86,7 @@ impl service::rooms::user::Data for KeyValueDatabase { fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>>>> { + ) -> Result>> + 'a>> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs new file mode 100644 index 00000000..d84bd494 --- /dev/null +++ b/src/database/key_value/sending.rs @@ -0,0 +1,203 @@ +use ruma::{ServerName, UserId}; + +use crate::{ + database::KeyValueDatabase, + service::{ + self, + sending::{OutgoingKind, SendingEventType}, + }, + utils, Error, Result, +}; + +impl service::sending::Data for KeyValueDatabase { + fn active_requests<'a>( + &'a self, + ) -> Box, OutgoingKind, SendingEventType)>> + 'a> { + Box::new( + self.servercurrentevent_data + .iter() + .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(k, e)| (key, k, e))), + ) + } + + fn active_requests_for<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box, SendingEventType)>> + 'a> { + let prefix = outgoing_kind.get_prefix(); + Box::new( + self.servercurrentevent_data + .scan_prefix(prefix) + .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(_, e)| (key, e))), + ) + } + + fn delete_active_request(&self, key: Vec) -> Result<()> { + self.servercurrentevent_data.remove(&key) + } + + fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { + let prefix = outgoing_kind.get_prefix(); + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key)?; + } + + Ok(()) + } + + fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { + let prefix = outgoing_kind.get_prefix(); + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key).unwrap(); + } + + for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { + self.servernameevent_data.remove(&key).unwrap(); + } + + Ok(()) + } + + fn queue_requests( + &self, + requests: &[(&OutgoingKind, SendingEventType)], + ) -> Result>> { + let mut batch = Vec::new(); + let mut keys = Vec::new(); + for (outgoing_kind, event) in requests { + let mut key = outgoing_kind.get_prefix(); + key.push(0xff); + key.extend_from_slice(if let SendingEventType::Pdu(value) = &event { + &**value + } else { + &[] + }); + let value = if let SendingEventType::Edu(value) = &event { + &**value + } else { + &[] + }; + batch.push((key.clone(), value.to_owned())); + keys.push(key); + } + self.servernameevent_data + .insert_batch(&mut batch.into_iter())?; + Ok(keys) + } + + fn queued_requests<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box)>> + 'a> { + let prefix = outgoing_kind.get_prefix(); + return Box::new( + self.servernameevent_data + .scan_prefix(prefix.clone()) + .map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))), + ); + } + + fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()> { + for (e, key) in events { + let value = if let SendingEventType::Edu(value) = &e { + &**value + } else { + &[] + }; + self.servercurrentevent_data.insert(key, value)?; + self.servernameevent_data.remove(key)?; + } + + Ok(()) + } + + fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) -> Result<()> { + self.servername_educount + .insert(server_name.as_bytes(), &last_count.to_be_bytes()) + } + + fn get_latest_educount(&self, server_name: &ServerName) -> Result { + self.servername_educount + .get(server_name.as_bytes())? + .map_or(Ok(0), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) + }) + } +} + +#[tracing::instrument(skip(key))] +fn parse_servercurrentevent( + key: &[u8], + value: Vec, +) -> Result<(OutgoingKind, SendingEventType)> { + // Appservices start with a plus + Ok::<_, Error>(if key.starts_with(b"+") { + let mut parts = key[1..].splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + + ( + OutgoingKind::Appservice(server), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) + }, + ) + } else if key.starts_with(b"$") { + let mut parts = key[1..].splitn(3, |&b| b == 0xff); + + let user = parts.next().expect("splitn always returns one element"); + let user_string = utils::string_from_bytes(&user) + .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; + let user_id = UserId::parse(user_string) + .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; + + let pushkey = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let pushkey_string = utils::string_from_bytes(pushkey) + .map_err(|_| Error::bad_database("Invalid pushkey in servercurrentevent"))?; + + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + ( + OutgoingKind::Push(user_id, pushkey_string), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + // I'm pretty sure this should never be called + SendingEventType::Edu(value) + }, + ) + } else { + let mut parts = key.splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + + ( + OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) + }, + ) + }) +} diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 791e2498..86689f85 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -67,7 +67,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all users on this homeserver. - fn iter(&self) -> Box>>> { + fn iter<'a>(&'a self) -> Box>> + 'a> { Box::new(self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") @@ -83,33 +83,11 @@ impl service::users::Data for KeyValueDatabase { let users: Vec = self .userid_password .iter() - .filter_map(|(username, pw)| self.get_username_with_valid_password(&username, &pw)) + .filter_map(|(username, pw)| get_username_with_valid_password(&username, &pw)) .collect(); Ok(users) } - /// Will only return with Some(username) if the password was not empty and the - /// username could be successfully parsed. - /// If utils::string_from_bytes(...) returns an error that username will be skipped - /// and the error will be logged. - fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { - // A valid password is not empty - if password.is_empty() { - None - } else { - match utils::string_from_bytes(username) { - Ok(u) => Some(u), - Err(e) => { - warn!( - "Failed to parse username while calling get_local_users(): {}", - e.to_string() - ); - None - } - } - } - } - /// Returns the password hash for the given user. fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password @@ -281,7 +259,7 @@ impl service::users::Data for KeyValueDatabase { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> Box>>> { + ) -> Box>> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata @@ -626,7 +604,7 @@ impl service::users::Data for KeyValueDatabase { user_or_room_id: &str, from: u64, to: Option, - ) -> Box>>> { + ) -> Box>> + 'a> { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -906,7 +884,7 @@ impl service::users::Data for KeyValueDatabase { fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> Box>> { + ) -> Box> + 'a> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -956,3 +934,26 @@ impl service::users::Data for KeyValueDatabase { } } } + +/// Will only return with Some(username) if the password was not empty and the +/// username could be successfully parsed. +/// If utils::string_from_bytes(...) returns an error that username will be skipped +/// and the error will be logged. +fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option { + // A valid password is not empty + if password.is_empty() { + None + } else { + match utils::string_from_bytes(username) { + Ok(u) => Some(u), + Err(e) => { + warn!( + "Failed to parse username while calling get_local_users(): {}", + e.to_string() + ); + None + } + } + } +} + diff --git a/src/database/mod.rs b/src/database/mod.rs index c4e64af8..191cd62f 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -166,19 +166,6 @@ pub struct KeyValueDatabase { pub(super) shortstatekey_cache: Mutex>, pub(super) our_real_users_cache: RwLock, Arc>>>>, pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, pub(super) lasttimelinecount_cache: Mutex, u64>>, } @@ -279,10 +266,7 @@ impl KeyValueDatabase { eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); } - let (admin_sender, admin_receiver) = mpsc::unbounded_channel(); - let (sending_sender, sending_receiver) = mpsc::unbounded_channel(); - - let db = Arc::new(Self { + let db_raw = Box::new(Self { _db: builder.clone(), userid_password: builder.open_tree("userid_password")?, userid_displayname: builder.open_tree("userid_displayname")?, @@ -399,14 +383,12 @@ impl KeyValueDatabase { )), our_real_users_cache: RwLock::new(HashMap::new()), appservice_in_room_cache: RwLock::new(HashMap::new()), - lazy_load_waiting: Mutex::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new( - (100.0 * config.conduit_cache_capacity_modifier) as usize, - )), lasttimelinecount_cache: Mutex::new(HashMap::new()), }); - let services_raw = Box::new(Services::build(Arc::clone(&db), config)?); + let db = Box::leak(db_raw); + + let services_raw = Box::new(Services::build(db, config)?); // This is the first and only time we initialize the SERVICE static *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); @@ -851,8 +833,6 @@ impl KeyValueDatabase { // This data is probably outdated db.presenceid_presence.clear()?; - services().admin.start_handler(admin_receiver); - // Set emergency access for the conduit user match set_emergency_access() { Ok(pwd_set) => { @@ -869,19 +849,11 @@ impl KeyValueDatabase { } }; - services().sending.start_handler(sending_receiver); - Self::start_cleanup_task().await; Ok(()) } - #[cfg(feature = "conduit_bin")] - pub async fn on_shutdown() { - info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - services().globals.rotate.fire(); - } - #[tracing::instrument(skip(self))] pub fn flush(&self) -> Result<()> { let start = std::time::Instant::now(); diff --git a/src/lib.rs b/src/lib.rs index 0afc75f1..9c397c08 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -21,6 +21,7 @@ pub use api::ruma_wrapper::{Ruma, RumaResponse}; pub use config::Config; pub use service::{pdu::PduEvent, Services}; pub use utils::error::{Error, Result}; +pub use database::KeyValueDatabase; pub static SERVICES: RwLock> = RwLock::new(None); diff --git a/src/main.rs b/src/main.rs index d5b2731e..71eaa660 100644 --- a/src/main.rs +++ b/src/main.rs @@ -17,6 +17,7 @@ use axum::{ Router, }; use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; +use conduit::api::{client_server, server_server}; use figment::{ providers::{Env, Format, Toml}, Figment, @@ -34,7 +35,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; -use tracing::warn; +use tracing::{warn, info}; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate @@ -69,7 +70,7 @@ async fn main() { config.warn_deprecated(); - if let Err(e) = KeyValueDatabase::load_or_create(&config).await { + if let Err(e) = KeyValueDatabase::load_or_create(config).await { eprintln!( "The database couldn't be loaded or created. The following error occured: {}", e @@ -77,6 +78,8 @@ async fn main() { std::process::exit(1); }; + let config = &services().globals.config; + let start = async { run_server().await.unwrap(); }; @@ -119,7 +122,7 @@ async fn main() { } async fn run_server() -> io::Result<()> { - let config = DB.globals.config; + let config = &services().globals.config; let addr = SocketAddr::from((config.address, config.port)); let x_requested_with = HeaderName::from_static("x-requested-with"); @@ -156,8 +159,7 @@ async fn run_server() -> io::Result<()> { header::AUTHORIZATION, ]) .max_age(Duration::from_secs(86400)), - ) - .add_extension(db.clone()); + ); let app = routes().layer(middlewares).into_make_service(); let handle = ServerHandle::new(); @@ -174,8 +176,9 @@ async fn run_server() -> io::Result<()> { } } - // After serve exits and before exiting, shutdown the DB - Database::on_shutdown(db).await; + // On shutdown + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + services().globals.rotate.fire(); Ok(()) } diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 60a53080..975c8203 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -13,7 +13,7 @@ use std::{collections::HashMap, sync::Arc}; use crate::{Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 8725e674..2c776611 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -172,74 +172,82 @@ pub struct Service { } impl Service { - pub fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver) { - tokio::spawn(async move { - // TODO: Use futures when we have long admin commands - //let mut futures = FuturesUnordered::new(); + pub fn build() -> Arc { + let (sender, receiver) = mpsc::unbounded_channel(); + let self1 = Arc::new(Self { sender }); + let self2 = Arc::clone(&self1); - let conduit_user = - UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); + tokio::spawn(async move { self2.start_handler(receiver).await; }); - let conduit_room = services() + self1 + } + + async fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver) { + // TODO: Use futures when we have long admin commands + //let mut futures = FuturesUnordered::new(); + + let conduit_user = + UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); + + let conduit_room = services() + .rooms + .alias + .resolve_local_alias( + format!("#admins:{}", services().globals.server_name()) + .as_str() + .try_into() + .expect("#admins:server_name is a valid room alias"), + ) + .expect("Database data for admin room alias must be valid") + .expect("Admin room must exist"); + + let send_message = |message: RoomMessageEventContent, + mutex_lock: &MutexGuard<'_, ()>| { + services() .rooms - .alias - .resolve_local_alias( - format!("#admins:{}", services().globals.server_name()) - .as_str() - .try_into() - .expect("#admins:server_name is a valid room alias"), + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + mutex_lock, ) - .expect("Database data for admin room alias must be valid") - .expect("Admin room must exist"); + .unwrap(); + }; - let send_message = |message: RoomMessageEventContent, - mutex_lock: &MutexGuard<'_, ()>| { - services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMessage, - content: to_raw_value(&message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - mutex_lock, - ) - .unwrap(); - }; - - loop { - tokio::select! { - Some(event) = receiver.recv() => { - let message_content = match event { - AdminRoomEvent::SendMessage(content) => content, - AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await - }; - - let mutex_state = Arc::clone( - services().globals - .roomid_mutex_state - .write() - .unwrap() - .entry(conduit_room.to_owned()) - .or_default(), - ); - - let state_lock = mutex_state.lock().await; - - send_message(message_content, &state_lock); - - drop(state_lock); - } + loop { + tokio::select! { + Some(event) = receiver.recv() => { + let message_content = match event { + AdminRoomEvent::SendMessage(content) => content, + AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await + }; + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(conduit_room.to_owned()) + .or_default(), + ); + + let state_lock = mutex_state.lock().await; + + send_message(message_content, &state_lock); + + drop(state_lock); } } - }); + } } pub fn process_message(&self, room_message: String) { @@ -382,9 +390,7 @@ impl Service { } } AdminCommand::ListRooms => { - todo!(); - /* - let room_ids = services().rooms.iter_ids(); + let room_ids = services().rooms.metadata.iter_ids(); let output = format!( "Rooms:\n{}", room_ids @@ -393,6 +399,7 @@ impl Service { + "\tMembers: " + &services() .rooms + .state_cache .room_joined_count(&id) .ok() .flatten() @@ -402,7 +409,6 @@ impl Service { .join("\n") ); RoomMessageEventContent::text_plain(output) - */ } AdminCommand::ListLocalUsers => match services().users.list_local_users() { Ok(users) => { @@ -648,11 +654,11 @@ impl Service { )) } AdminCommand::DisableRoom { room_id } => { - services().rooms.metadata.disable_room(&room_id, true); + services().rooms.metadata.disable_room(&room_id, true)?; RoomMessageEventContent::text_plain("Room disabled.") } AdminCommand::EnableRoom { room_id } => { - services().rooms.metadata.disable_room(&room_id, false); + services().rooms.metadata.disable_room(&room_id, false)?; RoomMessageEventContent::text_plain("Room enabled.") } AdminCommand::DeactivateUser { diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index ad5ab4aa..20ba08ad 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -6,7 +6,7 @@ pub use data::Data; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 6e03c156..477b269d 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -35,7 +35,7 @@ type SyncHandle = ( ); pub struct Service { - pub db: Arc, + pub db: &'static dyn Data, pub actual_destination_cache: Arc>, // actual_destination, host pub tls_name_override: Arc>, @@ -90,14 +90,14 @@ impl Default for RotationHandler { } impl Service { - pub fn load(db: Arc, config: Config) -> Result { + pub fn load(db: &'static dyn Data, config: Config) -> Result { let keypair = db.load_keypair(); let keypair = match keypair { Ok(k) => k, Err(e) => { error!("Keypair invalid. Deleting..."); - db.remove_keypair(); + db.remove_keypair()?; return Err(e); } }; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 31652d2f..5d0ad599 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -12,7 +12,7 @@ use ruma::{ use std::{collections::BTreeMap, sync::Arc}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 61a733a7..29648577 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -16,7 +16,7 @@ pub struct FileMeta { } pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/mod.rs b/src/service/mod.rs index e1c6f7a4..e8696e79 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -29,11 +29,11 @@ pub struct Services { pub uiaa: uiaa::Service, pub users: users::Service, pub account_data: account_data::Service, - pub admin: admin::Service, + pub admin: Arc, pub globals: globals::Service, pub key_backups: key_backups::Service, pub media: media::Service, - pub sending: sending::Service, + pub sending: Arc, } impl Services { @@ -47,60 +47,60 @@ impl Services { + account_data::Data + globals::Data + key_backups::Data - + media::Data, + + media::Data + + sending::Data + + 'static >( - db: Arc, + db: &'static D, config: Config, ) -> Result { Ok(Self { - appservice: appservice::Service { db: db.clone() }, - pusher: pusher::Service { db: db.clone() }, + appservice: appservice::Service { db }, + pusher: pusher::Service { db }, rooms: rooms::Service { - alias: rooms::alias::Service { db: db.clone() }, - auth_chain: rooms::auth_chain::Service { db: db.clone() }, - directory: rooms::directory::Service { db: db.clone() }, + alias: rooms::alias::Service { db }, + auth_chain: rooms::auth_chain::Service { db }, + directory: rooms::directory::Service { db }, edus: rooms::edus::Service { - presence: rooms::edus::presence::Service { db: db.clone() }, - read_receipt: rooms::edus::read_receipt::Service { db: db.clone() }, - typing: rooms::edus::typing::Service { db: db.clone() }, + presence: rooms::edus::presence::Service { db }, + read_receipt: rooms::edus::read_receipt::Service { db }, + typing: rooms::edus::typing::Service { db }, }, event_handler: rooms::event_handler::Service, lazy_loading: rooms::lazy_loading::Service { - db: db.clone(), + db, lazy_load_waiting: Mutex::new(HashMap::new()), }, - metadata: rooms::metadata::Service { db: db.clone() }, - outlier: rooms::outlier::Service { db: db.clone() }, - pdu_metadata: rooms::pdu_metadata::Service { db: db.clone() }, - search: rooms::search::Service { db: db.clone() }, - short: rooms::short::Service { db: db.clone() }, - state: rooms::state::Service { db: db.clone() }, - state_accessor: rooms::state_accessor::Service { db: db.clone() }, - state_cache: rooms::state_cache::Service { db: db.clone() }, + metadata: rooms::metadata::Service { db }, + outlier: rooms::outlier::Service { db }, + pdu_metadata: rooms::pdu_metadata::Service { db }, + search: rooms::search::Service { db }, + short: rooms::short::Service { db }, + state: rooms::state::Service { db }, + state_accessor: rooms::state_accessor::Service { db }, + state_cache: rooms::state_cache::Service { db }, state_compressor: rooms::state_compressor::Service { - db: db.clone(), + db, stateinfo_cache: Mutex::new(LruCache::new( (100.0 * config.conduit_cache_capacity_modifier) as usize, )), }, timeline: rooms::timeline::Service { - db: db.clone(), + db, lasttimelinecount_cache: Mutex::new(HashMap::new()), }, - user: rooms::user::Service { db: db.clone() }, - }, - transaction_ids: transaction_ids::Service { db: db.clone() }, - uiaa: uiaa::Service { db: db.clone() }, - users: users::Service { db: db.clone() }, - account_data: account_data::Service { db: db.clone() }, - admin: admin::Service { sender: todo!() }, - globals: globals::Service::load(db.clone(), config)?, - key_backups: key_backups::Service { db: db.clone() }, - media: media::Service { db: db.clone() }, - sending: sending::Service { - maximum_requests: todo!(), - sender: todo!(), + user: rooms::user::Service { db }, }, + transaction_ids: transaction_ids::Service { db }, + uiaa: uiaa::Service { db }, + users: users::Service { db }, + account_data: account_data::Service { db }, + admin: admin::Service::build(), + key_backups: key_backups::Service { db }, + media: media::Service { db }, + sending: sending::Service::build(db, &config), + + globals: globals::Service::load(db, config)?, }) } } diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index 243b77f7..cb8768d8 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -7,9 +7,9 @@ use ruma::{ pub trait Data: Send + Sync { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; - fn get_pusher(&self, senderkey: &[u8]) -> Result>; + fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result>; fn get_pushers(&self, sender: &UserId) -> Result>; - fn get_pusher_senderkeys<'a>(&'a self, sender: &UserId) -> Box>>; + fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a>; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 78d5f26c..3b12f38b 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -26,7 +26,7 @@ use std::{fmt::Debug, mem}; use tracing::{error, info, warn}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -34,19 +34,19 @@ impl Service { self.db.set_pusher(sender, pusher) } - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.db.get_pusher(senderkey) + pub fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + self.db.get_pusher(sender, pushkey) } pub fn get_pushers(&self, sender: &UserId) -> Result> { self.db.get_pushers(sender) } - pub fn get_pusher_senderkeys<'a>( + pub fn get_pushkeys<'a>( &'a self, sender: &UserId, - ) -> impl Iterator> + 'a { - self.db.get_pusher_senderkeys(sender) + ) -> Box>> { + self.db.get_pushkeys(sender) } #[tracing::instrument(skip(self, destination, request))] diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 90205f93..6299add7 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -12,8 +12,8 @@ pub trait Data: Send + Sync { fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>>; /// Returns all local aliases that point to the given room - fn local_aliases_for_room( - &self, + fn local_aliases_for_room<'a>( + &'a self, room_id: &RoomId, - ) -> Box>>>; + ) -> Box>> + 'a>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 6a3cf4e0..e76589ab 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -7,7 +7,7 @@ use crate::Result; use ruma::{RoomAliasId, RoomId}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -30,7 +30,7 @@ impl Service { pub fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> Box>> + 'a> { self.db.local_aliases_for_room(room_id) } } diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs index ed06385d..d3b6e401 100644 --- a/src/service/rooms/auth_chain/mod.rs +++ b/src/service/rooms/auth_chain/mod.rs @@ -11,7 +11,7 @@ use tracing::log::warn; use crate::{services, Error, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index fb523cf8..320c6db1 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -12,5 +12,5 @@ pub trait Data: Send + Sync { fn is_public_room(&self, room_id: &RoomId) -> Result; /// Returns the unsorted public room directory - fn public_rooms(&self) -> Box>>>; + fn public_rooms<'a>(&'a self) -> Box>> + 'a>; } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index e85afef6..9e5e8156 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -7,7 +7,7 @@ use ruma::RoomId; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs index 8552363e..cf7a3591 100644 --- a/src/service/rooms/edus/mod.rs +++ b/src/service/rooms/edus/mod.rs @@ -2,7 +2,7 @@ pub mod presence; pub mod read_receipt; pub mod typing; -pub trait Data: presence::Data + read_receipt::Data + typing::Data {} +pub trait Data: presence::Data + read_receipt::Data + typing::Data + 'static {} pub struct Service { pub presence: presence::Service, diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 636bd910..9cce9d8c 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -7,7 +7,7 @@ use ruma::{events::presence::PresenceEvent, RoomId, UserId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 734c68d5..9a02ee40 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -11,8 +11,8 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - fn readreceipts_since( - &self, + fn readreceipts_since<'a>( + &'a self, room_id: &RoomId, since: u64, ) -> Box< @@ -22,7 +22,7 @@ pub trait Data: Send + Sync { u64, Raw, )>, - >, + > + 'a, >; /// Sets a private read marker at `count`. diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 35fee1a5..8d6eaafd 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -7,7 +7,7 @@ use crate::Result; use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 91892df6..fc06fe4a 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -7,7 +7,7 @@ use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 12320388..0c0bd2ce 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -256,7 +256,7 @@ impl Service { #[tracing::instrument(skip(self, create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( - &self, + &'a self, origin: &'a ServerName, create_event: &'a PduEvent, event_id: &'a EventId, @@ -1015,7 +1015,7 @@ impl Service { /// d. TODO: Ask other servers over federation? #[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( - &self, + &'a self, origin: &'a ServerName, events: &'a [Arc], create_event: &'a PduEvent, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index a01ce9ba..2ed0bed0 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -10,9 +10,9 @@ use ruma::{DeviceId, RoomId, UserId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, - lazy_load_waiting: + pub lazy_load_waiting: Mutex, Box, Box, u64), HashSet>>>, } @@ -67,7 +67,7 @@ impl Service { user_id, device_id, room_id, - &mut user_ids.iter().map(|&u| &*u), + &mut user_ids.iter().map(|u| &**u), )?; } else { // Ignore diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index 27e7eb98..df416dac 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -3,6 +3,7 @@ use ruma::RoomId; pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; + fn iter_ids<'a>(&'a self) -> Box>> + 'a>; fn is_disabled(&self, room_id: &RoomId) -> Result; fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index b6cccd15..df9f40a8 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -7,7 +7,7 @@ use ruma::RoomId; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -17,6 +17,10 @@ impl Service { self.db.exists(room_id) } + pub fn iter_ids<'a>(&'a self) -> Box>> + 'a> { + self.db.iter_ids() + } + pub fn is_disabled(&self, room_id: &RoomId) -> Result { self.db.is_disabled(room_id) } diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 6404d8a1..443abd19 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -7,7 +7,7 @@ use ruma::{signatures::CanonicalJsonObject, EventId}; use crate::{PduEvent, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 70443389..b816678c 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -7,7 +7,7 @@ use ruma::{EventId, RoomId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 59652e02..bd7d61bb 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -2,11 +2,11 @@ use crate::Result; use ruma::RoomId; pub trait Data: Send + Sync { - fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: String) -> Result<()>; + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; fn search_pdus<'a>( &'a self, room_id: &RoomId, search_string: &str, - ) -> Result>>, Vec)>>; + ) -> Result>+ 'a>, Vec)>>; } diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 0ef96342..1d8d01e1 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -7,7 +7,7 @@ use crate::Result; use ruma::RoomId; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -16,7 +16,7 @@ impl Service { &self, shortroomid: u64, pdu_id: &[u8], - message_body: String, + message_body: &str, ) -> Result<()> { self.db.index_pdu(shortroomid, pdu_id, message_body) } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index efa4362a..d847dea2 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -7,7 +7,7 @@ use ruma::{events::StateEventType, EventId, RoomId}; use crate::{Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 2dff4b71..614236ca 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -23,7 +23,7 @@ use crate::{services, utils::calculate_hash, Error, PduEvent, Result}; use super::state_compressor::CompressedStateEvent; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -33,7 +33,7 @@ impl Service { room_id: &RoomId, shortstatehash: u64, statediffnew: HashSet, - statediffremoved: HashSet, + _statediffremoved: HashSet, ) -> Result<()> { let mutex_state = Arc::clone( services() @@ -102,7 +102,7 @@ impl Service { services().rooms.state_cache.update_joined_count(room_id)?; - self.db.set_room_state(room_id, shortstatehash, &state_lock); + self.db.set_room_state(room_id, shortstatehash, &state_lock)?; drop(state_lock); diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index e179d70f..1a9c4a9e 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -10,7 +10,7 @@ use ruma::{events::StateEventType, EventId, RoomId}; use crate::{PduEvent, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 608dbcaa..cf4c6655 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -17,7 +17,7 @@ use ruma::{ use crate::{services, Error, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -112,7 +112,7 @@ impl Service { }; // Copy direct chat flag - if let Some(mut direct_event) = services() + if let Some(direct_event) = services() .account_data .get( None, @@ -125,7 +125,7 @@ impl Service { }) }) { - let direct_event = direct_event?; + let mut direct_event = direct_event?; let mut room_ids_updated = false; for room_ids in direct_event.content.0.values_mut() { diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index f7c6dba0..b927cb72 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -14,7 +14,7 @@ use crate::{services, utils, Result}; use self::data::StateDiff; pub struct Service { - db: Arc, + pub db: &'static dyn Data, pub stateinfo_cache: Mutex< LruCache< diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 4ae8ce96..095731ca 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -60,7 +60,7 @@ pub trait Data: Send + Sync { user_id: &UserId, room_id: &RoomId, since: u64, - ) -> Result, PduEvent)>>>>; + ) -> Result, PduEvent)>> + 'a>>; /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. @@ -69,14 +69,14 @@ pub trait Data: Send + Sync { user_id: &UserId, room_id: &RoomId, until: u64, - ) -> Result, PduEvent)>>>>; + ) -> Result, PduEvent)>> + 'a>>; fn pdus_after<'a>( &'a self, user_id: &UserId, room_id: &RoomId, from: u64, - ) -> Result, PduEvent)>>>>; + ) -> Result, PduEvent)>> + 'a>>; fn increment_notification_counts( &self, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 73f1451b..01c54a3a 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -36,9 +36,9 @@ use crate::{ use super::state_compressor::CompressedStateEvent; pub struct Service { - db: Arc, + pub db: &'static dyn Data, - pub(super) lasttimelinecount_cache: Mutex, u64>>, + pub lasttimelinecount_cache: Mutex, u64>>, } impl Service { @@ -253,10 +253,10 @@ impl Service { .rooms .state_cache .get_our_real_users(&pdu.room_id)? - .into_iter() + .iter() { // Don't notify the user of their own events - if &user == &pdu.sender { + if user == &pdu.sender { continue; } @@ -297,20 +297,20 @@ impl Service { } if notify { - notifies.push(user); + notifies.push(user.clone()); } if highlight { - highlights.push(user); + highlights.push(user.clone()); } - for senderkey in services().pusher.get_pusher_senderkeys(&user) { - services().sending.send_push_pdu(&*pdu_id, senderkey)?; + for push_key in services().pusher.get_pushkeys(&user) { + services().sending.send_push_pdu(&*pdu_id, &user, push_key?)?; } } self.db - .increment_notification_counts(&pdu.room_id, notifies, highlights); + .increment_notification_counts(&pdu.room_id, notifies, highlights)?; match pdu.kind { RoomEventType::RoomRedaction => { @@ -365,7 +365,7 @@ impl Service { services() .rooms .search - .index_pdu(shortroomid, &pdu_id, body)?; + .index_pdu(shortroomid, &pdu_id, &body)?; let admin_room = services().rooms.alias.resolve_local_alias( <&RoomAliasId>::try_from( @@ -398,7 +398,7 @@ impl Service { { services() .sending - .send_pdu_appservice(&appservice.0, &pdu_id)?; + .send_pdu_appservice(appservice.0, pdu_id.clone())?; continue; } @@ -422,7 +422,7 @@ impl Service { if state_key_uid == &appservice_uid { services() .sending - .send_pdu_appservice(&appservice.0, &pdu_id)?; + .send_pdu_appservice(appservice.0, pdu_id.clone())?; continue; } } @@ -475,7 +475,7 @@ impl Service { { services() .sending - .send_pdu_appservice(&appservice.0, &pdu_id)?; + .send_pdu_appservice(appservice.0, pdu_id.clone())?; } } } @@ -565,7 +565,7 @@ impl Service { } } - let pdu = PduEvent { + let mut pdu = PduEvent { event_id: ruma::event_id!("$thiswillbefilledinlater").into(), room_id: room_id.to_owned(), sender: sender.to_owned(), diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index fcaff5ac..7b7841fb 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -20,5 +20,5 @@ pub trait Data: Send + Sync { fn get_shared_rooms<'a>( &'a self, users: Vec>, - ) -> Result>>>>; + ) -> Result>> + 'a>>; } diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 1caa4b3f..0148399b 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -7,7 +7,7 @@ use ruma::{RoomId, UserId}; use crate::Result; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs new file mode 100644 index 00000000..2e574e23 --- /dev/null +++ b/src/service/sending/data.rs @@ -0,0 +1,29 @@ +use ruma::ServerName; + +use crate::Result; + +use super::{OutgoingKind, SendingEventType}; + +pub trait Data: Send + Sync { + fn active_requests<'a>( + &'a self, + ) -> Box, OutgoingKind, SendingEventType)>> + 'a>; + fn active_requests_for<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box, SendingEventType)>> + 'a>; + fn delete_active_request(&self, key: Vec) -> Result<()>; + fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; + fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; + fn queue_requests( + &self, + requests: &[(&OutgoingKind, SendingEventType)], + ) -> Result>>; + fn queued_requests<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box)>> + 'a>; + fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()>; + fn set_latest_educount(&self, server_name: &ServerName, educount: u64) -> Result<()>; + fn get_latest_educount(&self, server_name: &ServerName) -> Result; +} diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index e5e8cffd..cb16e70d 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -1,15 +1,19 @@ +mod data; + +pub use data::Data; + use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, sync::Arc, - time::{Duration, Instant}, + time::{Duration, Instant}, iter, }; use crate::{ api::{appservice_server, server_server}, services, utils::{self, calculate_hash}, - Error, PduEvent, Result, + Error, PduEvent, Result, Config, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -40,7 +44,7 @@ use tracing::{error, warn}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(String), - Push(Vec, Vec), // user and pushkey + Push(Box, String), // user and pushkey Normal(Box), } @@ -55,9 +59,9 @@ impl OutgoingKind { } OutgoingKind::Push(user, pushkey) => { let mut p = b"$".to_vec(); - p.extend_from_slice(user); + p.extend_from_slice(user.as_bytes()); p.push(0xff); - p.extend_from_slice(pushkey); + p.extend_from_slice(pushkey.as_bytes()); p } OutgoingKind::Normal(server) => { @@ -74,14 +78,16 @@ impl OutgoingKind { #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum SendingEventType { - Pdu(Vec), - Edu(Vec), + Pdu(Vec), // pduid + Edu(Vec), // pdu json } pub struct Service { + db: &'static dyn Data, + /// The state for a given state hash. pub(super) maximum_requests: Arc, - pub sender: mpsc::UnboundedSender<(Vec, Vec)>, + pub sender: mpsc::UnboundedSender<(OutgoingKind, SendingEventType, Vec)>, } enum TransactionStatus { @@ -91,131 +97,113 @@ enum TransactionStatus { } impl Service { - pub fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>) { + pub fn build(db: &'static dyn Data, config: &Config) -> Arc { + let (sender, receiver) = mpsc::unbounded_channel(); + + let self1 = Arc::new(Self { db, sender, maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)) }); + let self2 = Arc::clone(&self1); + tokio::spawn(async move { - let mut futures = FuturesUnordered::new(); + self2.start_handler(receiver).await.unwrap(); + }); - let mut current_transaction_status = HashMap::, TransactionStatus>::new(); + self1 + } - // Retry requests we could not finish yet - let mut initial_transactions = HashMap::>::new(); + async fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver<(OutgoingKind, SendingEventType, Vec)>) -> Result<()> { + let mut futures = FuturesUnordered::new(); - for (key, outgoing_kind, event) in services() - .sending - .servercurrentevent_data - .iter() - .filter_map(|(key, v)| { - Self::parse_servercurrentevent(&key, v) - .ok() - .map(|(k, e)| (key, k, e)) - }) - { - let entry = initial_transactions - .entry(outgoing_kind.clone()) - .or_insert_with(Vec::new); - - if entry.len() > 30 { - warn!( - "Dropping some current events: {:?} {:?} {:?}", - key, outgoing_kind, event - ); - services() - .sending - .servercurrentevent_data - .remove(&key) - .unwrap(); - continue; - } + let mut current_transaction_status = HashMap::::new(); - entry.push(event); - } + // Retry requests we could not finish yet + let mut initial_transactions = HashMap::>::new(); - for (outgoing_kind, events) in initial_transactions { - current_transaction_status - .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); - futures.push(Self::handle_events(outgoing_kind.clone(), events)); + for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) + { + let entry = initial_transactions + .entry(outgoing_kind.clone()) + .or_insert_with(Vec::new); + + if entry.len() > 30 { + warn!( + "Dropping some current events: {:?} {:?} {:?}", + key, outgoing_kind, event + ); + self.db.delete_active_request(key)?; + continue; } - loop { - select! { - Some(response) = futures.next() => { - match response { - Ok(outgoing_kind) => { - let prefix = outgoing_kind.get_prefix(); - for (key, _) in services().sending.servercurrentevent_data - .scan_prefix(prefix.clone()) - { - services().sending.servercurrentevent_data.remove(&key).unwrap(); - } - - // Find events that have been added since starting the last request - let new_events: Vec<_> = services().sending.servernameevent_data - .scan_prefix(prefix.clone()) - .filter_map(|(k, v)| { - Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) - }) - .take(30) - .collect(); - - // TODO: find edus - - if !new_events.is_empty() { - // Insert pdus we found - for (e, key) in &new_events { - let value = if let SendingEventType::Edu(value) = &e.1 { &**value } else { &[] }; - services().sending.servercurrentevent_data.insert(key, value).unwrap(); - services().sending.servernameevent_data.remove(key).unwrap(); - } - - futures.push( - Self::handle_events( - outgoing_kind.clone(), - new_events.into_iter().map(|(event, _)| event.1).collect(), - ) - ); - } else { - current_transaction_status.remove(&prefix); - } - } - Err((outgoing_kind, _)) => { - current_transaction_status.entry(outgoing_kind.get_prefix()).and_modify(|e| *e = match e { - TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), - TransactionStatus::Failed(_, _) => { - error!("Request that was not even running failed?!"); - return - }, - }); - } - }; - }, - Some((key, value)) = receiver.recv() => { - if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) { - if let Ok(Some(events)) = Self::select_events( - &outgoing_kind, - vec![(event, key)], - &mut current_transaction_status, - ) { - futures.push(Self::handle_events(outgoing_kind, events)); + entry.push(event); + } + + for (outgoing_kind, events) in initial_transactions { + current_transaction_status + .insert(outgoing_kind.clone(), TransactionStatus::Running); + futures.push(Self::handle_events(outgoing_kind.clone(), events)); + } + + loop { + select! { + Some(response) = futures.next() => { + match response { + Ok(outgoing_kind) => { + self.db.delete_all_active_requests_for(&outgoing_kind)?; + + // Find events that have been added since starting the last request + let new_events = self.db.queued_requests(&outgoing_kind).filter_map(|r| r.ok()).take(30).collect::>(); + + // TODO: find edus + + if !new_events.is_empty() { + // Insert pdus we found + self.db.mark_as_active(&new_events)?; + + futures.push( + Self::handle_events( + outgoing_kind.clone(), + new_events.into_iter().map(|(event, _)| event).collect(), + ) + ); + } else { + current_transaction_status.remove(&outgoing_kind); } } + Err((outgoing_kind, _)) => { + current_transaction_status.entry(outgoing_kind).and_modify(|e| *e = match e { + TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), + TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), + TransactionStatus::Failed(_, _) => { + error!("Request that was not even running failed?!"); + return + }, + }); + } + }; + }, + Some((outgoing_kind, event, key)) = receiver.recv() => { + if let Ok(Some(events)) = self.select_events( + &outgoing_kind, + vec![(event, key)], + &mut current_transaction_status, + ) { + futures.push(Self::handle_events(outgoing_kind, events)); } } } - }); + } } - #[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status))] + #[tracing::instrument(skip(self, outgoing_kind, new_events, current_transaction_status))] fn select_events( + &self, outgoing_kind: &OutgoingKind, new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key - current_transaction_status: &mut HashMap, TransactionStatus>, + current_transaction_status: &mut HashMap, ) -> Result>> { let mut retry = false; let mut allow = true; - let prefix = outgoing_kind.get_prefix(); - let entry = current_transaction_status.entry(prefix.clone()); + let entry = current_transaction_status.entry(outgoing_kind.clone()); entry .and_modify(|e| match e { @@ -247,42 +235,20 @@ impl Service { if retry { // We retry the previous transaction - for (key, value) in services() - .sending - .servercurrentevent_data - .scan_prefix(prefix) - { - if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) { - events.push(e); - } + for (_, e) in self.db.active_requests_for(outgoing_kind).filter_map(|r| r.ok()) { + events.push(e); } } else { - for (e, full_key) in new_events { - let value = if let SendingEventType::Edu(value) = &e { - &**value - } else { - &[][..] - }; - services() - .sending - .servercurrentevent_data - .insert(&full_key, value)?; - - // If it was a PDU we have to unqueue it - // TODO: don't try to unqueue EDUs - services().sending.servernameevent_data.remove(&full_key)?; - + self.db.mark_as_active(&new_events)?; + for (e, _) in new_events { events.push(e); } if let OutgoingKind::Normal(server_name) = outgoing_kind { - if let Ok((select_edus, last_count)) = Self::select_edus(server_name) { + if let Ok((select_edus, last_count)) = self.select_edus(server_name) { events.extend(select_edus.into_iter().map(SendingEventType::Edu)); - services() - .sending - .servername_educount - .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; + self.db.set_latest_educount(server_name, last_count)?; } } } @@ -290,22 +256,15 @@ impl Service { Ok(Some(events)) } - #[tracing::instrument(skip(server))] - pub fn select_edus(server: &ServerName) -> Result<(Vec>, u64)> { + #[tracing::instrument(skip(self, server_name))] + pub fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { // u64: count of last edu - let since = services() - .sending - .servername_educount - .get(server.as_bytes())? - .map_or(Ok(0), |&bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) - })?; + let since = self.db.get_latest_educount(server_name)?; let mut events = Vec::new(); let mut max_edu_count = since; let mut device_list_changes = HashSet::new(); - 'outer: for room_id in services().rooms.server_rooms(server) { + 'outer: for room_id in services().rooms.state_cache.server_rooms(server_name) { let room_id = room_id?; // Look for device list updates in this room device_list_changes.extend( @@ -317,7 +276,7 @@ impl Service { ); // Look for read receipts in this room - for r in services().rooms.edus.readreceipts_since(&room_id, since) { + for r in services().rooms.edus.read_receipt.readreceipts_since(&room_id, since) { let (user_id, count, read_receipt) = r?; if count > max_edu_count { @@ -395,14 +354,12 @@ impl Service { Ok((events, max_edu_count)) } - #[tracing::instrument(skip(self, pdu_id, senderkey))] - pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Vec) -> Result<()> { - let mut key = b"$".to_vec(); - key.extend_from_slice(&senderkey); - key.push(0xff); - key.extend_from_slice(pdu_id); - self.servernameevent_data.insert(&key, &[])?; - self.sender.send((key, vec![])).unwrap(); + #[tracing::instrument(skip(self, pdu_id, user, pushkey))] + pub fn send_push_pdu(&self, pdu_id: &[u8], user: &UserId, pushkey: String) -> Result<()> { + let outgoing_kind = OutgoingKind::Push(user.to_owned(), pushkey); + let event = SendingEventType::Pdu(pdu_id.to_owned()); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); Ok(()) } @@ -413,17 +370,11 @@ impl Service { servers: I, pdu_id: &[u8], ) -> Result<()> { - let mut batch = servers.map(|server| { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu_id); - - self.sender.send((key.clone(), vec![])).unwrap(); - - (key, Vec::new()) - }); - - self.servernameevent_data.insert_batch(&mut batch)?; + let requests = servers.into_iter().map(|server| (OutgoingKind::Normal(server), SendingEventType::Pdu(pdu_id.to_owned()))).collect::>(); + let keys = self.db.queue_requests(&requests.iter().map(|(o, e)| (o, e.clone())).collect::>())?; + for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) { + self.sender.send((outgoing_kind.to_owned(), event, key)).unwrap(); + } Ok(()) } @@ -435,23 +386,20 @@ impl Service { serialized: Vec, id: u64, ) -> Result<()> { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&id.to_be_bytes()); - self.servernameevent_data.insert(&key, &serialized)?; - self.sender.send((key, serialized)).unwrap(); + let outgoing_kind = OutgoingKind::Normal(server.to_owned()); + let event = SendingEventType::Edu(serialized); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); Ok(()) } #[tracing::instrument(skip(self))] - pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = b"+".to_vec(); - key.extend_from_slice(appservice_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(pdu_id); - self.servernameevent_data.insert(&key, &[])?; - self.sender.send((key, vec![])).unwrap(); + pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: Vec) -> Result<()> { + let outgoing_kind = OutgoingKind::Appservice(appservice_id); + let event = SendingEventType::Pdu(pdu_id); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); Ok(()) } @@ -460,18 +408,8 @@ impl Service { /// Used for instance after we remove an appservice registration /// #[tracing::instrument(skip(self))] - pub fn cleanup_events(&self, key_id: &str) -> Result<()> { - let mut prefix = b"+".to_vec(); - prefix.extend_from_slice(key_id.as_bytes()); - prefix.push(0xff); - - for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { - self.servercurrentevent_data.remove(&key).unwrap(); - } - - for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { - self.servernameevent_data.remove(&key).unwrap(); - } + pub fn cleanup_events(&self, appservice_id: String) -> Result<()> { + self.db.delete_all_requests_for(&OutgoingKind::Appservice(appservice_id))?; Ok(()) } @@ -488,7 +426,7 @@ impl Service { for event in &events { match event { SendingEventType::Pdu(pdu_id) => { - pdu_jsons.push(services().rooms + pdu_jsons.push(services().rooms.timeline .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -525,7 +463,7 @@ impl Service { appservice::event::push_events::v1::Request { events: &pdu_jsons, txn_id: (&*base64::encode_config( - Self::calculate_hash( + calculate_hash( &events .iter() .map(|e| match e { @@ -546,7 +484,7 @@ impl Service { response } - OutgoingKind::Push(user, pushkey) => { + OutgoingKind::Push(userid, pushkey) => { let mut pdus = Vec::new(); for event in &events { @@ -554,6 +492,7 @@ impl Service { SendingEventType::Pdu(pdu_id) => { pdus.push( services().rooms + .timeline .get_pdu_from_id(pdu_id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { @@ -584,27 +523,10 @@ impl Service { } } - let userid = UserId::parse(utils::string_from_bytes(user).map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user string in db."), - ) - })?) - .map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user id in db."), - ) - })?; - - let mut senderkey = user.clone(); - senderkey.push(0xff); - senderkey.extend_from_slice(pushkey); - let pusher = match services() .pusher - .get_pusher(&senderkey) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? + .get_pusher(&userid, pushkey) + .map_err(|e| (OutgoingKind::Push(userid.clone(), pushkey.clone()), e))? { Some(pusher) => pusher, None => continue, @@ -618,11 +540,13 @@ impl Service { GlobalAccountDataEventType::PushRules.to_string().into(), ) .unwrap_or_default() + .and_then(|event| serde_json::from_str::(event.get()).ok()) .map(|ev: PushRulesEvent| ev.content.global) .unwrap_or_else(|| push::Ruleset::server_default(&userid)); let unread: UInt = services() .rooms + .user .notification_count(&userid, &pdu.room_id) .map_err(|e| (kind.clone(), e))? .try_into() @@ -639,7 +563,7 @@ impl Service { drop(permit); } - Ok(OutgoingKind::Push(user.clone(), pushkey.clone())) + Ok(OutgoingKind::Push(userid.clone(), pushkey.clone())) } OutgoingKind::Normal(server) => { let mut edu_jsons = Vec::new(); @@ -651,6 +575,7 @@ impl Service { // TODO: check room version and remove event_id if needed let raw = PduEvent::convert_to_outgoing_federation_event( services().rooms + .timeline .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { @@ -713,72 +638,6 @@ impl Service { } } - #[tracing::instrument(skip(key))] - fn parse_servercurrentevent( - key: &[u8], - value: Vec, - ) -> Result<(OutgoingKind, SendingEventType)> { - // Appservices start with a plus - Ok::<_, Error>(if key.starts_with(b"+") { - let mut parts = key[1..].splitn(2, |&b| b == 0xff); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - ( - OutgoingKind::Appservice(server), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - } else if key.starts_with(b"$") { - let mut parts = key[1..].splitn(3, |&b| b == 0xff); - - let user = parts.next().expect("splitn always returns one element"); - let pushkey = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - ( - OutgoingKind::Push(user.to_vec(), pushkey.to_vec()), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - } else { - let mut parts = key.splitn(2, |&b| b == 0xff); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - ( - OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - }) - } #[tracing::instrument(skip(self, destination, request))] pub async fn send_federation_request( diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index a473e2b1..509b65c0 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -7,7 +7,7 @@ use crate::Result; use ruma::{DeviceId, TransactionId, UserId}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 8f3b3b8b..f8addcc5 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -16,7 +16,7 @@ use tracing::error; use crate::{api::client_server::SESSION_ID_LENGTH, services, utils, Error, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 9f315d3b..9537ed2a 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -22,19 +22,13 @@ pub trait Data: Send + Sync { fn find_from_token(&self, token: &str) -> Result, String)>>; /// Returns an iterator over all users on this homeserver. - fn iter(&self) -> Box>>>; + fn iter<'a>(&'a self) -> Box>> + 'a>; /// Returns a list of local users as list of usernames. /// /// A user account is considered `local` if the length of it's password is greater then zero. fn list_local_users(&self) -> Result>; - /// Will only return with Some(username) if the password was not empty and the - /// username could be successfully parsed. - /// If utils::string_from_bytes(...) returns an error that username will be skipped - /// and the error will be logged. - fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option; - /// Returns the password hash for the given user. fn password_hash(&self, user_id: &UserId) -> Result>; @@ -75,7 +69,7 @@ pub trait Data: Send + Sync { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> Box>>>; + ) -> Box>> + 'a>; /// Replaces the access token of one device. fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; @@ -131,7 +125,7 @@ pub trait Data: Send + Sync { user_or_room_id: &str, from: u64, to: Option, - ) -> Box>>>; + ) -> Box>> + 'a>; fn mark_device_key_update(&self, user_id: &UserId) -> Result<()>; @@ -193,7 +187,7 @@ pub trait Data: Send + Sync { fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> Box>>; + ) -> Box> + 'a>; /// Creates a new sync filter. Returns the filter id. fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 0b83460c..e3419e75 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -13,7 +13,7 @@ use ruma::{ use crate::{services, Error, Result}; pub struct Service { - db: Arc, + pub db: &'static dyn Data, } impl Service { @@ -72,14 +72,6 @@ impl Service { self.db.list_local_users() } - /// Will only return with Some(username) if the password was not empty and the - /// username could be successfully parsed. - /// If utils::string_from_bytes(...) returns an error that username will be skipped - /// and the error will be logged. - fn get_username_with_valid_password(&self, username: &[u8], password: &[u8]) -> Option { - self.db.get_username_with_valid_password(username, password) - } - /// Returns the password hash for the given user. pub fn password_hash(&self, user_id: &UserId) -> Result> { self.db.password_hash(user_id) @@ -275,7 +267,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, ) -> Result>> { - self.get_to_device_events(user_id, device_id) + self.db.get_to_device_events(user_id, device_id) } pub fn remove_to_device_events( @@ -302,7 +294,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, ) -> Result> { - self.get_device_metadata(user_id, device_id) + self.db.get_device_metadata(user_id, device_id) } pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { From 7822a385bbbdc608501a93499b83a3ca89fafb0f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 13:03:07 +0200 Subject: [PATCH 394/445] cargo fmt --- src/api/client_server/account.rs | 9 +-- src/api/client_server/membership.rs | 7 +- src/api/server_server.rs | 15 ++-- src/database/key_value/account_data.rs | 6 +- src/database/key_value/pusher.rs | 20 ++++-- .../key_value/rooms/edus/read_receipt.rs | 12 ++-- src/database/key_value/rooms/metadata.rs | 3 +- src/database/key_value/rooms/search.rs | 2 +- src/database/key_value/users.rs | 4 +- src/database/mod.rs | 10 ++- src/lib.rs | 6 +- src/main.rs | 2 +- src/service/account_data/mod.rs | 6 +- src/service/admin/mod.rs | 16 ++--- src/service/globals/mod.rs | 4 +- src/service/key_backups/mod.rs | 6 +- src/service/media/mod.rs | 2 +- src/service/mod.rs | 4 +- src/service/pusher/data.rs | 6 +- src/service/pusher/mod.rs | 11 +-- src/service/rooms/edus/read_receipt/data.rs | 12 ++-- src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/search/data.rs | 2 +- src/service/rooms/search/mod.rs | 7 +- src/service/rooms/short/mod.rs | 2 +- src/service/rooms/state/mod.rs | 3 +- src/service/rooms/state_cache/mod.rs | 5 +- src/service/rooms/timeline/mod.rs | 6 +- src/service/sending/mod.rs | 72 ++++++++++++++----- 29 files changed, 143 insertions(+), 119 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index e27d295e..58624a28 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -1,5 +1,3 @@ - - use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; use crate::{api::client_server, services, utils, Error, Result, Ruma}; use ruma::{ @@ -11,12 +9,7 @@ use ruma::{ error::ErrorKind, uiaa::{AuthFlow, AuthType, UiaaInfo}, }, - events::{ - room::{ - message::RoomMessageEventContent, - }, - GlobalAccountDataEventType, - }, + events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType}, push, UserId, }; diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 0aae9959..d971e6b7 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -11,12 +11,11 @@ use ruma::{ federation::{self, membership::create_invite}, }, events::{ - room::{ - member::{MembershipState, RoomMemberEventContent}, - }, + room::member::{MembershipState, RoomMemberEventContent}, RoomEventType, StateEventType, }, - serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, EventId, RoomId, RoomVersionId, ServerName, UserId, + serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, + EventId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ diff --git a/src/api/server_server.rs b/src/api/server_server.rs index bcf893c6..66aac9e9 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -4,7 +4,7 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; -use futures_util::{StreamExt}; +use futures_util::StreamExt; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; @@ -15,10 +15,7 @@ use ruma::{ authorization::get_event_authorization, device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, - discovery::{ - get_server_keys, - get_server_version, ServerSigningKeys, VerifyKey, - }, + discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, keys::{claim_keys, get_keys}, membership::{ @@ -46,13 +43,13 @@ use ruma::{ }, receipt::ReceiptType, serde::{Base64, JsonObject, Raw}, - signatures::{CanonicalJsonValue}, - to_device::DeviceIdOrAllDevices, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, - ServerSigningKeyId, + signatures::CanonicalJsonValue, + to_device::DeviceIdOrAllDevices, + EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{BTreeMap}, + collections::BTreeMap, fmt::Debug, mem, net::{IpAddr, SocketAddr}, diff --git a/src/database/key_value/account_data.rs b/src/database/key_value/account_data.rs index 0e8029ff..e1eef966 100644 --- a/src/database/key_value/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,12 +1,12 @@ use std::collections::HashMap; use ruma::{ - api::client::{error::ErrorKind}, + api::client::error::ErrorKind, events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, RoomId, UserId, + serde::Raw, + RoomId, UserId, }; - use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::account_data::Data for KeyValueDatabase { diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 1468a553..42d4030b 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -3,7 +3,7 @@ use ruma::{ UserId, }; -use crate::{database::KeyValueDatabase, service, Error, Result, utils}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; impl service::pusher::Data for KeyValueDatabase { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { @@ -28,7 +28,11 @@ impl service::pusher::Data for KeyValueDatabase { Ok(()) } - fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result> { let mut senderkey = sender.as_bytes().to_vec(); senderkey.push(0xff); senderkey.extend_from_slice(pushkey.as_bytes()); @@ -55,15 +59,21 @@ impl service::pusher::Data for KeyValueDatabase { .collect() } - fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a> { + fn get_pushkeys<'a>( + &'a self, + sender: &UserId, + ) -> Box> + 'a> { let mut prefix = sender.as_bytes().to_vec(); prefix.push(0xff); Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { let mut parts = k.splitn(2, |&b| b == 0xff); let _senderkey = parts.next(); - let push_key = parts.next().ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; - let push_key_string = utils::string_from_bytes(push_key).map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; + let push_key = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; + let push_key_string = utils::string_from_bytes(push_key) + .map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; Ok(push_key_string) })) diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index 19c1ced7..a8349f6e 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -54,12 +54,12 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { since: u64, ) -> Box< dyn Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a, + Item = Result<( + Box, + u64, + Raw, + )>, + > + 'a, > { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 2ec18bed..0f61dbb4 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,6 +1,6 @@ use ruma::RoomId; -use crate::{database::KeyValueDatabase, service, services, Result, utils, Error}; +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::metadata::Data for KeyValueDatabase { fn exists(&self, room_id: &RoomId) -> Result { @@ -27,7 +27,6 @@ impl service::rooms::metadata::Data for KeyValueDatabase { ) .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) })) - } fn is_disabled(&self, room_id: &RoomId) -> Result { diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 8aa7a639..788c2965 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -26,7 +26,7 @@ impl service::rooms::search::Data for KeyValueDatabase { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result>+ 'a>, Vec)>> { + ) -> Result> + 'a>, Vec)>> { let prefix = services() .rooms .short diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 86689f85..8213c5d7 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,8 +5,7 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, - UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, UInt, UserId, }; use tracing::warn; @@ -956,4 +955,3 @@ fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option< } } } - diff --git a/src/database/mod.rs b/src/database/mod.rs index 191cd62f..977daf35 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -2,15 +2,13 @@ pub mod abstraction; pub mod key_value; use crate::{ - service::{ - rooms::{state_compressor::CompressedStateEvent}, - }, - services, utils, Config, Error, PduEvent, Result, Services, SERVICES, + service::rooms::state_compressor::CompressedStateEvent, services, utils, Config, Error, + PduEvent, Result, Services, SERVICES, }; use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; -use futures_util::{StreamExt}; +use futures_util::StreamExt; use lru_cache::LruCache; use ruma::{ events::{ @@ -29,7 +27,7 @@ use std::{ path::Path, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::{mpsc}; +use tokio::sync::mpsc; use tracing::{debug, error, info, warn}; pub struct KeyValueDatabase { diff --git a/src/lib.rs b/src/lib.rs index 9c397c08..541b8c8d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,15 +13,13 @@ mod database; mod service; mod utils; -use std::{ - sync::{RwLock}, -}; +use std::sync::RwLock; pub use api::ruma_wrapper::{Ruma, RumaResponse}; pub use config::Config; +pub use database::KeyValueDatabase; pub use service::{pdu::PduEvent, Services}; pub use utils::error::{Error, Result}; -pub use database::KeyValueDatabase; pub static SERVICES: RwLock> = RwLock::new(None); diff --git a/src/main.rs b/src/main.rs index 71eaa660..c7ef62d0 100644 --- a/src/main.rs +++ b/src/main.rs @@ -35,7 +35,7 @@ use tower_http::{ trace::TraceLayer, ServiceBuilderExt as _, }; -use tracing::{warn, info}; +use tracing::{info, warn}; use tracing_subscriber::{prelude::*, EnvFilter}; pub use conduit::*; // Re-export everything from the library crate diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 975c8203..a4a678d6 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -4,13 +4,13 @@ pub use data::Data; use ruma::{ events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, - serde::Raw, RoomId, UserId, + serde::Raw, + RoomId, UserId, }; use std::{collections::HashMap, sync::Arc}; - -use crate::{Result}; +use crate::Result; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 2c776611..8f33056f 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -29,9 +29,7 @@ use serde_json::value::to_raw_value; use tokio::sync::{mpsc, MutexGuard}; use crate::{ - api::{ - client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, - }, + api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, services, utils::{self, HtmlEscape}, Error, PduEvent, Result, @@ -177,7 +175,9 @@ impl Service { let self1 = Arc::new(Self { sender }); let self2 = Arc::clone(&self1); - tokio::spawn(async move { self2.start_handler(receiver).await; }); + tokio::spawn(async move { + self2.start_handler(receiver).await; + }); self1 } @@ -186,9 +186,8 @@ impl Service { // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); - let conduit_user = - UserId::parse(format!("@conduit:{}", services().globals.server_name())) - .expect("@conduit:server_name is valid"); + let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); let conduit_room = services() .rooms @@ -202,8 +201,7 @@ impl Service { .expect("Database data for admin room alias must be valid") .expect("Admin room must exist"); - let send_message = |message: RoomMessageEventContent, - mutex_lock: &MutexGuard<'_, ()>| { + let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| { services() .rooms .timeline diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 477b269d..054df095 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -3,15 +3,13 @@ pub use data::Data; use crate::api::server_server::FedDest; - use crate::{Config, Error, Result}; use ruma::{ api::{ client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, UserId, + DeviceId, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, }; use std::{ collections::{BTreeMap, HashMap}, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 5d0ad599..c8df0afc 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -1,11 +1,9 @@ mod data; pub use data::Data; -use crate::{Result}; +use crate::Result; use ruma::{ - api::client::{ - backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, - }, + api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, RoomId, UserId, }; diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 29648577..96e9aa34 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -3,7 +3,7 @@ pub use data::Data; use crate::{services, Result}; use image::{imageops::FilterType, GenericImageView}; -use std::{sync::Arc}; +use std::sync::Arc; use tokio::{ fs::File, io::{AsyncReadExt, AsyncWriteExt}, diff --git a/src/service/mod.rs b/src/service/mod.rs index e8696e79..385dcc69 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -1,5 +1,5 @@ use std::{ - collections::{HashMap}, + collections::HashMap, sync::{Arc, Mutex}, }; @@ -49,7 +49,7 @@ impl Services { + key_backups::Data + media::Data + sending::Data - + 'static + + 'static, >( db: &'static D, config: Config, diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs index cb8768d8..e3171210 100644 --- a/src/service/pusher/data.rs +++ b/src/service/pusher/data.rs @@ -7,9 +7,11 @@ use ruma::{ pub trait Data: Send + Sync { fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; - fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result>; + fn get_pusher(&self, sender: &UserId, pushkey: &str) + -> Result>; fn get_pushers(&self, sender: &UserId) -> Result>; - fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box> + 'a>; + fn get_pushkeys<'a>(&'a self, sender: &UserId) + -> Box> + 'a>; } diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 3b12f38b..f8e5bca6 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -34,7 +34,11 @@ impl Service { self.db.set_pusher(sender, pusher) } - pub fn get_pusher(&self, sender: &UserId, pushkey: &str) -> Result> { + pub fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result> { self.db.get_pusher(sender, pushkey) } @@ -42,10 +46,7 @@ impl Service { self.db.get_pushers(sender) } - pub fn get_pushkeys<'a>( - &'a self, - sender: &UserId, - ) -> Box>> { + pub fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box>> { self.db.get_pushkeys(sender) } diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 9a02ee40..800c035f 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -17,12 +17,12 @@ pub trait Data: Send + Sync { since: u64, ) -> Box< dyn Iterator< - Item = Result<( - Box, - u64, - Raw, - )>, - > + 'a, + Item = Result<( + Box, + u64, + Raw, + )>, + > + 'a, >; /// Sets a private read marker at `count`. diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 0c0bd2ce..e5f8424b 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -34,7 +34,7 @@ use ruma::{ state_res::{self, RoomVersion, StateMap}, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; -use serde_json::value::{RawValue as RawJsonValue}; +use serde_json::value::RawValue as RawJsonValue; use tracing::{debug, error, info, trace, warn}; use crate::{service::*, services, Error, PduEvent, Result}; diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index bd7d61bb..82c08004 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -8,5 +8,5 @@ pub trait Data: Send + Sync { &'a self, room_id: &RoomId, search_string: &str, - ) -> Result>+ 'a>, Vec)>>; + ) -> Result> + 'a>, Vec)>>; } diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 1d8d01e1..80356303 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -12,12 +12,7 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] - pub fn index_pdu<'a>( - &self, - shortroomid: u64, - pdu_id: &[u8], - message_body: &str, - ) -> Result<()> { + pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { self.db.index_pdu(shortroomid, pdu_id, message_body) } diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs index d847dea2..45fadd74 100644 --- a/src/service/rooms/short/mod.rs +++ b/src/service/rooms/short/mod.rs @@ -4,7 +4,7 @@ use std::sync::Arc; pub use data::Data; use ruma::{events::StateEventType, EventId, RoomId}; -use crate::{Result}; +use crate::Result; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 614236ca..7b8b0fde 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -102,7 +102,8 @@ impl Service { services().rooms.state_cache.update_joined_count(room_id)?; - self.db.set_room_state(room_id, shortstatehash, &state_lock)?; + self.db + .set_room_state(room_id, shortstatehash, &state_lock)?; drop(state_lock); diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index cf4c6655..2b4762ae 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -5,10 +5,11 @@ pub use data::Data; use ruma::{ events::{ - direct::{DirectEvent}, + direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, room::{create::RoomCreateEventContent, member::MembershipState}, - AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEventType, StateEventType, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, }, serde::Raw, RoomId, ServerName, UserId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 01c54a3a..16f50d23 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -2,8 +2,8 @@ mod data; use std::collections::HashMap; +use std::collections::HashSet; use std::sync::{Arc, Mutex}; -use std::{collections::HashSet}; pub use data::Data; use regex::Regex; @@ -305,7 +305,9 @@ impl Service { } for push_key in services().pusher.get_pushkeys(&user) { - services().sending.send_push_pdu(&*pdu_id, &user, push_key?)?; + services() + .sending + .send_push_pdu(&*pdu_id, &user, push_key?)?; } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index cb16e70d..b67f1e28 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -5,15 +5,16 @@ pub use data::Data; use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, + iter, sync::Arc, - time::{Duration, Instant}, iter, + time::{Duration, Instant}, }; use crate::{ api::{appservice_server, server_server}, services, utils::{self, calculate_hash}, - Error, PduEvent, Result, Config, + Config, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; use futures_util::{stream::FuturesUnordered, StreamExt}; @@ -100,7 +101,11 @@ impl Service { pub fn build(db: &'static dyn Data, config: &Config) -> Arc { let (sender, receiver) = mpsc::unbounded_channel(); - let self1 = Arc::new(Self { db, sender, maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)) }); + let self1 = Arc::new(Self { + db, + sender, + maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), + }); let self2 = Arc::clone(&self1); tokio::spawn(async move { @@ -110,7 +115,10 @@ impl Service { self1 } - async fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver<(OutgoingKind, SendingEventType, Vec)>) -> Result<()> { + async fn start_handler( + &self, + mut receiver: mpsc::UnboundedReceiver<(OutgoingKind, SendingEventType, Vec)>, + ) -> Result<()> { let mut futures = FuturesUnordered::new(); let mut current_transaction_status = HashMap::::new(); @@ -118,8 +126,7 @@ impl Service { // Retry requests we could not finish yet let mut initial_transactions = HashMap::>::new(); - for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) - { + for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) { let entry = initial_transactions .entry(outgoing_kind.clone()) .or_insert_with(Vec::new); @@ -137,8 +144,7 @@ impl Service { } for (outgoing_kind, events) in initial_transactions { - current_transaction_status - .insert(outgoing_kind.clone(), TransactionStatus::Running); + current_transaction_status.insert(outgoing_kind.clone(), TransactionStatus::Running); futures.push(Self::handle_events(outgoing_kind.clone(), events)); } @@ -235,7 +241,11 @@ impl Service { if retry { // We retry the previous transaction - for (_, e) in self.db.active_requests_for(outgoing_kind).filter_map(|r| r.ok()) { + for (_, e) in self + .db + .active_requests_for(outgoing_kind) + .filter_map(|r| r.ok()) + { events.push(e); } } else { @@ -276,7 +286,12 @@ impl Service { ); // Look for read receipts in this room - for r in services().rooms.edus.read_receipt.readreceipts_since(&room_id, since) { + for r in services() + .rooms + .edus + .read_receipt + .readreceipts_since(&room_id, since) + { let (user_id, count, read_receipt) = r?; if count > max_edu_count { @@ -359,7 +374,9 @@ impl Service { let outgoing_kind = OutgoingKind::Push(user.to_owned(), pushkey); let event = SendingEventType::Pdu(pdu_id.to_owned()); let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); Ok(()) } @@ -370,10 +387,25 @@ impl Service { servers: I, pdu_id: &[u8], ) -> Result<()> { - let requests = servers.into_iter().map(|server| (OutgoingKind::Normal(server), SendingEventType::Pdu(pdu_id.to_owned()))).collect::>(); - let keys = self.db.queue_requests(&requests.iter().map(|(o, e)| (o, e.clone())).collect::>())?; + let requests = servers + .into_iter() + .map(|server| { + ( + OutgoingKind::Normal(server), + SendingEventType::Pdu(pdu_id.to_owned()), + ) + }) + .collect::>(); + let keys = self.db.queue_requests( + &requests + .iter() + .map(|(o, e)| (o, e.clone())) + .collect::>(), + )?; for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) { - self.sender.send((outgoing_kind.to_owned(), event, key)).unwrap(); + self.sender + .send((outgoing_kind.to_owned(), event, key)) + .unwrap(); } Ok(()) @@ -389,7 +421,9 @@ impl Service { let outgoing_kind = OutgoingKind::Normal(server.to_owned()); let event = SendingEventType::Edu(serialized); let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); Ok(()) } @@ -399,7 +433,9 @@ impl Service { let outgoing_kind = OutgoingKind::Appservice(appservice_id); let event = SendingEventType::Pdu(pdu_id); let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; - self.sender.send((outgoing_kind, event, keys.into_iter().next().unwrap())).unwrap(); + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); Ok(()) } @@ -409,7 +445,8 @@ impl Service { /// #[tracing::instrument(skip(self))] pub fn cleanup_events(&self, appservice_id: String) -> Result<()> { - self.db.delete_all_requests_for(&OutgoingKind::Appservice(appservice_id))?; + self.db + .delete_all_requests_for(&OutgoingKind::Appservice(appservice_id))?; Ok(()) } @@ -638,7 +675,6 @@ impl Service { } } - #[tracing::instrument(skip(self, destination, request))] pub async fn send_federation_request( &self, From 50b0eb9929104a1eed008cbf0a8965a802c20306 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 13:04:55 +0200 Subject: [PATCH 395/445] cargo fix --- src/api/server_server.rs | 1 - src/database/mod.rs | 5 ++--- src/main.rs | 4 ++-- src/service/account_data/mod.rs | 2 +- src/service/appservice/mod.rs | 2 +- src/service/key_backups/mod.rs | 2 +- src/service/media/mod.rs | 2 +- src/service/pusher/mod.rs | 2 +- src/service/rooms/alias/mod.rs | 2 +- src/service/rooms/directory/mod.rs | 2 +- src/service/rooms/edus/presence/mod.rs | 2 +- src/service/rooms/edus/read_receipt/mod.rs | 2 +- src/service/rooms/edus/typing/mod.rs | 2 +- src/service/rooms/lazy_loading/mod.rs | 2 +- src/service/rooms/metadata/mod.rs | 2 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/search/mod.rs | 2 +- src/service/rooms/user/mod.rs | 2 +- src/service/sending/mod.rs | 3 +-- src/service/transaction_ids/mod.rs | 2 +- src/service/uiaa/mod.rs | 2 +- src/service/users/mod.rs | 2 +- 22 files changed, 23 insertions(+), 26 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 66aac9e9..d54e1306 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -4,7 +4,6 @@ use crate::{ services, utils, Error, PduEvent, Result, Ruma, }; use axum::{response::IntoResponse, Json}; -use futures_util::StreamExt; use get_profile_information::v1::ProfileField; use http::header::{HeaderValue, AUTHORIZATION}; diff --git a/src/database/mod.rs b/src/database/mod.rs index 977daf35..882455f8 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -2,13 +2,12 @@ pub mod abstraction; pub mod key_value; use crate::{ - service::rooms::state_compressor::CompressedStateEvent, services, utils, Config, Error, + services, utils, Config, Error, PduEvent, Result, Services, SERVICES, }; use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; -use futures_util::StreamExt; use lru_cache::LruCache; use ruma::{ events::{ @@ -27,7 +26,7 @@ use std::{ path::Path, sync::{Arc, Mutex, RwLock}, }; -use tokio::sync::mpsc; + use tracing::{debug, error, info, warn}; pub struct KeyValueDatabase { diff --git a/src/main.rs b/src/main.rs index c7ef62d0..1aad62bd 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,7 +7,7 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -use std::{future::Future, io, net::SocketAddr, sync::Arc, time::Duration}; +use std::{future::Future, io, net::SocketAddr, time::Duration}; use axum::{ extract::{FromRequest, MatchedPath}, @@ -28,7 +28,7 @@ use http::{ }; use opentelemetry::trace::{FutureExt, Tracer}; use ruma::api::{client::error::ErrorKind, IncomingRequest}; -use tokio::{signal, sync::RwLock}; +use tokio::{signal}; use tower::ServiceBuilder; use tower_http::{ cors::{self, CorsLayer}, diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index a4a678d6..0387b139 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -8,7 +8,7 @@ use ruma::{ RoomId, UserId, }; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap}; use crate::Result; diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 20ba08ad..17402f4c 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index c8df0afc..51117cd5 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -7,7 +7,7 @@ use ruma::{ serde::Raw, RoomId, UserId, }; -use std::{collections::BTreeMap, sync::Arc}; +use std::{collections::BTreeMap}; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 96e9aa34..66841087 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -3,7 +3,7 @@ pub use data::Data; use crate::{services, Result}; use image::{imageops::FilterType, GenericImageView}; -use std::sync::Arc; + use tokio::{ fs::File, io::{AsyncReadExt, AsyncWriteExt}, diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index f8e5bca6..385a2071 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -21,7 +21,7 @@ use ruma::{ serde::Raw, uint, RoomId, UInt, UserId, }; -use std::sync::Arc; + use std::{fmt::Debug, mem}; use tracing::{error, info, warn}; diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index e76589ab..600a1201 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 9e5e8156..fcc0003d 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::RoomId; diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 9cce9d8c..0c3a3d6c 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::HashMap, sync::Arc}; +use std::{collections::HashMap}; pub use data::Data; use ruma::{events::presence::PresenceEvent, RoomId, UserId}; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 8d6eaafd..3664fe9b 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index fc06fe4a..3d8afe68 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 2ed0bed0..4ef58fd0 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,7 +1,7 @@ mod data; use std::{ collections::{HashMap, HashSet}, - sync::{Arc, Mutex}, + sync::{Mutex}, }; pub use data::Data; diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index df9f40a8..1a36010f 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::RoomId; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 443abd19..2b5976ce 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::{signatures::CanonicalJsonObject, EventId}; diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index 80356303..d15e42ef 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 0148399b..b7e2c00f 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; use ruma::{RoomId, UserId}; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index b67f1e28..f9e81d63 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -5,7 +5,6 @@ pub use data::Data; use std::{ collections::{BTreeMap, HashMap, HashSet}, fmt::Debug, - iter, sync::Arc, time::{Duration, Instant}, }; @@ -13,7 +12,7 @@ use std::{ use crate::{ api::{appservice_server, server_server}, services, - utils::{self, calculate_hash}, + utils::{calculate_hash}, Config, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 509b65c0..0cc30dbb 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index f8addcc5..1170193f 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::sync::Arc; + pub use data::Data; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index e3419e75..4bcb183b 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::BTreeMap, mem, sync::Arc}; +use std::{collections::BTreeMap, mem}; pub use data::Data; use ruma::{ From 8b5b7a1f637322e651c84d11a7b1616c7e29952e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 13:57:01 +0200 Subject: [PATCH 396/445] fix: panic on launch Now we start the admin and sending threads at a later time. --- src/api/client_server/directory.rs | 11 +++++---- src/database/mod.rs | 9 ++++---- src/main.rs | 2 +- src/service/account_data/mod.rs | 2 +- src/service/admin/mod.rs | 20 ++++++++++------- src/service/appservice/mod.rs | 1 - src/service/key_backups/mod.rs | 2 +- src/service/rooms/alias/mod.rs | 1 - src/service/rooms/directory/mod.rs | 1 - src/service/rooms/edus/presence/mod.rs | 2 +- src/service/rooms/edus/read_receipt/mod.rs | 1 - src/service/rooms/edus/typing/mod.rs | 1 - src/service/rooms/lazy_loading/mod.rs | 2 +- src/service/rooms/metadata/mod.rs | 1 - src/service/rooms/outlier/mod.rs | 1 - src/service/rooms/search/mod.rs | 1 - src/service/rooms/user/mod.rs | 1 - src/service/sending/mod.rs | 26 +++++++++++----------- src/service/transaction_ids/mod.rs | 1 - src/service/uiaa/mod.rs | 1 - 20 files changed, 40 insertions(+), 47 deletions(-) diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index c1b0eda5..90f79a02 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -29,7 +29,7 @@ use ruma::{ }, ServerName, UInt, }; -use tracing::{info, warn}; +use tracing::{error, info, warn}; /// # `POST /_matrix/client/r0/publicRooms` /// @@ -279,15 +279,14 @@ pub(crate) async fn get_public_rooms_filtered_helper( JoinRule::Knock => Some(PublicRoomJoinRule::Knock), _ => None, }) - .map_err(|_| { - Error::bad_database("Invalid room join rule event in database.") + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") }) }) .transpose()? .flatten() - .ok_or(Error::bad_database( - "Invalid room join rule event in database.", - ))?, + .ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?, room_id, }; Ok(chunk) diff --git a/src/database/mod.rs b/src/database/mod.rs index 882455f8..967ec885 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -1,10 +1,7 @@ pub mod abstraction; pub mod key_value; -use crate::{ - services, utils, Config, Error, - PduEvent, Result, Services, SERVICES, -}; +use crate::{services, utils, Config, Error, PduEvent, Result, Services, SERVICES}; use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; @@ -830,6 +827,8 @@ impl KeyValueDatabase { // This data is probably outdated db.presenceid_presence.clear()?; + services().admin.start_handler(); + // Set emergency access for the conduit user match set_emergency_access() { Ok(pwd_set) => { @@ -846,6 +845,8 @@ impl KeyValueDatabase { } }; + services().sending.start_handler(); + Self::start_cleanup_task().await; Ok(()) diff --git a/src/main.rs b/src/main.rs index 1aad62bd..ce7e5785 100644 --- a/src/main.rs +++ b/src/main.rs @@ -28,7 +28,7 @@ use http::{ }; use opentelemetry::trace::{FutureExt, Tracer}; use ruma::api::{client::error::ErrorKind, IncomingRequest}; -use tokio::{signal}; +use tokio::signal; use tower::ServiceBuilder; use tower_http::{ cors::{self, CorsLayer}, diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs index 0387b139..f9c49b1a 100644 --- a/src/service/account_data/mod.rs +++ b/src/service/account_data/mod.rs @@ -8,7 +8,7 @@ use ruma::{ RoomId, UserId, }; -use std::{collections::HashMap}; +use std::collections::HashMap; use crate::Result; diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 8f33056f..218a4ea4 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -26,7 +26,7 @@ use ruma::{ EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; -use tokio::sync::{mpsc, MutexGuard}; +use tokio::sync::{mpsc, Mutex, MutexGuard}; use crate::{ api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, @@ -164,25 +164,29 @@ pub enum AdminRoomEvent { SendMessage(RoomMessageEventContent), } -#[derive(Clone)] pub struct Service { pub sender: mpsc::UnboundedSender, + receiver: Mutex>, } impl Service { pub fn build() -> Arc { let (sender, receiver) = mpsc::unbounded_channel(); - let self1 = Arc::new(Self { sender }); - let self2 = Arc::clone(&self1); + Arc::new(Self { + sender, + receiver: Mutex::new(receiver), + }) + } + pub fn start_handler(self: &Arc) { + let self2 = Arc::clone(&self); tokio::spawn(async move { - self2.start_handler(receiver).await; + self2.handler().await; }); - - self1 } - async fn start_handler(&self, mut receiver: mpsc::UnboundedReceiver) { + async fn handler(&self) { + let mut receiver = self.receiver.lock().await; // TODO: Use futures when we have long admin commands //let mut futures = FuturesUnordered::new(); diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs index 17402f4c..3052964d 100644 --- a/src/service/appservice/mod.rs +++ b/src/service/appservice/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index 51117cd5..fef46130 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -7,7 +7,7 @@ use ruma::{ serde::Raw, RoomId, UserId, }; -use std::{collections::BTreeMap}; +use std::collections::BTreeMap; pub struct Service { pub db: &'static dyn Data, diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 600a1201..6b52549a 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index fcc0003d..0c1b2cd4 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::RoomId; diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 0c3a3d6c..36814309 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,5 @@ mod data; -use std::{collections::HashMap}; +use std::collections::HashMap; pub use data::Data; use ruma::{events::presence::PresenceEvent, RoomId, UserId}; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 3664fe9b..1b3ddb12 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs index 3d8afe68..d05ec900 100644 --- a/src/service/rooms/edus/typing/mod.rs +++ b/src/service/rooms/edus/typing/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index 4ef58fd0..b30bb9c1 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -1,7 +1,7 @@ mod data; use std::{ collections::{HashMap, HashSet}, - sync::{Mutex}, + sync::Mutex, }; pub use data::Data; diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index 1a36010f..c99ae4a2 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::RoomId; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index 2b5976ce..c84e975a 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::{signatures::CanonicalJsonObject, EventId}; diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs index d15e42ef..b6f35e79 100644 --- a/src/service/rooms/search/mod.rs +++ b/src/service/rooms/search/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index b7e2c00f..479e5568 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::{RoomId, UserId}; diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index f9e81d63..60fc6f46 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -12,7 +12,7 @@ use std::{ use crate::{ api::{appservice_server, server_server}, services, - utils::{calculate_hash}, + utils::calculate_hash, Config, Error, PduEvent, Result, }; use federation::transactions::send_transaction_message; @@ -37,7 +37,7 @@ use ruma::{ }; use tokio::{ select, - sync::{mpsc, Semaphore}, + sync::{mpsc, Mutex, Semaphore}, }; use tracing::{error, warn}; @@ -88,6 +88,7 @@ pub struct Service { /// The state for a given state hash. pub(super) maximum_requests: Arc, pub sender: mpsc::UnboundedSender<(OutgoingKind, SendingEventType, Vec)>, + receiver: Mutex)>>, } enum TransactionStatus { @@ -99,25 +100,24 @@ enum TransactionStatus { impl Service { pub fn build(db: &'static dyn Data, config: &Config) -> Arc { let (sender, receiver) = mpsc::unbounded_channel(); - - let self1 = Arc::new(Self { + Arc::new(Self { db, sender, + receiver: Mutex::new(receiver), maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), - }); - let self2 = Arc::clone(&self1); + }) + } + pub fn start_handler(self: &Arc) { + let self2 = Arc::clone(&self); tokio::spawn(async move { - self2.start_handler(receiver).await.unwrap(); + self2.handler().await.unwrap(); }); - - self1 } - async fn start_handler( - &self, - mut receiver: mpsc::UnboundedReceiver<(OutgoingKind, SendingEventType, Vec)>, - ) -> Result<()> { + async fn handler(&self) -> Result<()> { + let mut receiver = self.receiver.lock().await; + let mut futures = FuturesUnordered::new(); let mut current_transaction_status = HashMap::::new(); diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs index 0cc30dbb..2fa3b02e 100644 --- a/src/service/transaction_ids/mod.rs +++ b/src/service/transaction_ids/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use crate::Result; diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index 1170193f..e827cc89 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -1,6 +1,5 @@ mod data; - pub use data::Data; use ruma::{ From 25c3d89f281d101fcb904abd53d7c364cbe96e83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 8 Oct 2022 15:32:34 +0200 Subject: [PATCH 397/445] Bump rust version for const fn RwLock::new --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index b88674dd..d5f3c9dd 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -7,7 +7,7 @@ homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" version = "0.3.0-next" -rust-version = "1.56" +rust-version = "1.63" edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html From 5a04559cb47ed14ce23f1a88a52c8c908a45001a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 11:26:08 +0200 Subject: [PATCH 398/445] fix: maintain server list again --- src/database/key_value/rooms/state_cache.rs | 30 +++++++++++++++++++++ src/service/rooms/state_cache/mod.rs | 8 ------ 2 files changed, 30 insertions(+), 8 deletions(-) diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index 4ca6ac40..cbc05764 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -124,6 +124,36 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { .unwrap() .insert(room_id.to_owned(), Arc::new(real_users)); + for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { + if !joined_servers.remove(&old_joined_server) { + // Server not in room anymore + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(old_joined_server.as_bytes()); + + let mut serverroom_id = old_joined_server.as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.remove(&roomserver_id)?; + self.serverroomids.remove(&serverroom_id)?; + } + } + + // Now only new servers are in joined_servers anymore + for server in joined_servers { + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(server.as_bytes()); + + let mut serverroom_id = server.as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; + } + self.appservice_in_room_cache .write() .unwrap() diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 2b4762ae..9431d3a1 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -39,14 +39,6 @@ impl Service { // TODO: displayname, avatar url } - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - match &membership { MembershipState::Join => { // Check if the user never joined this room From 1a7893dbbd22f311d9cbce071d77d5990a8a1711 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 13:15:26 +0200 Subject: [PATCH 399/445] fix: update state_cache on join over federation --- src/api/client_server/membership.rs | 15 ++++++--------- .../key_value/rooms/state_accessor.rs | 6 +++--- src/service/rooms/event_handler/mod.rs | 7 ++----- src/service/rooms/state/data.rs | 2 +- src/service/rooms/state/mod.rs | 19 ++++--------------- src/service/rooms/state_compressor/mod.rs | 12 ++++++------ 6 files changed, 22 insertions(+), 39 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index d971e6b7..a91d079a 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -669,24 +669,21 @@ async fn join_room_by_id_helper( .add_pdu_outlier(&event_id, &value)?; } - let statehash_before_join = services().rooms.state.set_event_state( - event_id, + let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( room_id, state .into_iter() - .map(|(k, id)| { - services() - .rooms - .state_compressor - .compress_state_event(k, &id) - }) + .map(|(k, id)| services().rooms.state_compressor.compress_state_event(k, &id)) .collect::>()?, )?; services() .rooms .state - .set_room_state(room_id, statehash_before_join, &state_lock)?; + .force_state(room_id, statehash_before_join, new, removed, &state_lock) + .await?; + + services().rooms.state_cache.update_joined_count(room_id)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 39c261f3..70e59acb 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -23,7 +23,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let parsed = services() .rooms .state_compressor - .parse_compressed_state_event(compressed)?; + .parse_compressed_state_event(&compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -52,7 +52,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let (_, eventid) = services() .rooms .state_compressor - .parse_compressed_state_event(compressed)?; + .parse_compressed_state_event(&compressed)?; if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( @@ -104,7 +104,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { services() .rooms .state_compressor - .parse_compressed_state_event(compressed) + .parse_compressed_state_event(&compressed) .ok() .map(|(_, id)| id) })) diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index e5f8424b..cfe0fbf4 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -970,14 +970,11 @@ impl Service { // Set the new room state to the resolved state if update_state { info!("Forcing new room state"); - let sstatehash = services() + let (sstatehash, new, removed) = services() .rooms .state_compressor .save_state(room_id, new_room_state)?; - services() - .rooms - .state - .set_room_state(room_id, sstatehash, &state_lock)?; + services().rooms.state.force_state(room_id, sstatehash, new, removed, &state_lock).await?; } } diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 3aa49146..8e80b5e3 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -8,7 +8,7 @@ pub trait Data: Send + Sync { /// Returns the last state hash key added to the db for the given room. fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; - /// Update the current state of the room. + /// Set the state hash to a new version, but does not update state_cache. fn set_room_state( &self, room_id: &RoomId, diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 7b8b0fde..15fa79b8 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -34,23 +34,13 @@ impl Service { shortstatehash: u64, statediffnew: HashSet, _statediffremoved: HashSet, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { - let mutex_state = Arc::clone( - services() - .globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.to_owned()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - for event_id in statediffnew.into_iter().filter_map(|new| { services() .rooms .state_compressor - .parse_compressed_state_event(new) + .parse_compressed_state_event(&new) .ok() .map(|(_, id)| id) }) { @@ -105,8 +95,6 @@ impl Service { self.db .set_room_state(room_id, shortstatehash, &state_lock)?; - drop(state_lock); - Ok(()) } @@ -312,6 +300,7 @@ impl Service { Ok(state) } + /// Set the state hash to a new version, but does not update state_cache. #[tracing::instrument(skip(self))] pub fn set_room_state( &self, @@ -412,7 +401,7 @@ impl Service { services() .rooms .state_compressor - .parse_compressed_state_event(compressed) + .parse_compressed_state_event(&compressed) .ok() }) .filter_map(|(shortstatekey, event_id)| { diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index b927cb72..bcd3b9a1 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -100,7 +100,7 @@ impl Service { /// Returns shortstatekey, event id pub fn parse_compressed_state_event( &self, - compressed_event: CompressedStateEvent, + compressed_event: &CompressedStateEvent, ) -> Result<(u64, Arc)> { Ok(( utils::u64_from_bytes(&compressed_event[0..size_of::()]) @@ -246,12 +246,12 @@ impl Service { Ok(()) } - /// Returns the new shortstatehash + /// Returns the new shortstatehash, and the state diff from the previous room state pub fn save_state( &self, room_id: &RoomId, new_state_ids_compressed: HashSet, - ) -> Result { + ) -> Result<(u64, HashSet, HashSet)> { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; let state_hash = utils::calculate_hash( @@ -267,7 +267,7 @@ impl Service { .get_or_create_shortstatehash(&state_hash)?; if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(new_shortstatehash); + return Ok((new_shortstatehash, HashSet::new(), HashSet::new())); } let states_parents = previous_shortstatehash @@ -295,12 +295,12 @@ impl Service { self.save_state_from_diff( new_shortstatehash, statediffnew.clone(), - statediffremoved, + statediffremoved.clone(), 2, // every state change is 2 event changes on average states_parents, )?; }; - Ok(new_shortstatehash) + Ok((new_shortstatehash, statediffnew, statediffremoved)) } } From 275c6b447d9a3a2bbdc579de77317f9b27d289fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 15:34:36 +0200 Subject: [PATCH 400/445] Bump some dependencies --- Cargo.lock | 1078 +++++++++++++++------------------ Cargo.toml | 40 +- src/api/client_server/voip.rs | 2 +- src/main.rs | 2 +- src/service/globals/mod.rs | 6 +- src/service/media/mod.rs | 9 +- 6 files changed, 511 insertions(+), 626 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c074c760..2583c524 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,12 +8,6 @@ version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" -[[package]] -name = "adler32" -version = "1.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" - [[package]] name = "ahash" version = "0.7.6" @@ -27,42 +21,33 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] [[package]] name = "alloc-no-stdlib" -version = "2.0.3" +version = "2.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35ef4730490ad1c4eae5c4325b2a95f521d023e5c885853ff7aca0a6a1631db3" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" [[package]] name = "alloc-stdlib" -version = "0.2.1" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "697ed7edc0f1711de49ce108c541623a0af97c6c60b2f6e2b65229847ac843c2" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ "alloc-no-stdlib", ] -[[package]] -name = "ansi_term" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" -dependencies = [ - "winapi", -] - [[package]] name = "arc-swap" -version = "1.5.0" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5d78ce20460b82d3fa150275ed9d55e21064fc7951177baacf86a145c4a4b1f" +checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" [[package]] name = "arrayref" @@ -72,9 +57,9 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.5.2" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "assign" @@ -84,9 +69,9 @@ checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] name = "async-compression" -version = "0.3.14" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "345fd392ab01f746c717b1357165b76f0b67a60192007b234058c9045fdcf695" +checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" dependencies = [ "brotli", "flate2", @@ -124,9 +109,9 @@ checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] name = "axum" -version = "0.5.8" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8b4d4f9a5ca8b1ab8de59e663e68c6207059239373ca72980f5be7ab81231f74" +checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" dependencies = [ "async-trait", "axum-core", @@ -156,9 +141,9 @@ dependencies = [ [[package]] name = "axum-core" -version = "0.2.6" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf4d047478b986f14a13edad31a009e2e05cb241f9805d0d75e4cba4e129ad4d" +checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" dependencies = [ "async-trait", "bytes", @@ -166,13 +151,15 @@ dependencies = [ "http", "http-body", "mime", + "tower-layer", + "tower-service", ] [[package]] name = "axum-server" -version = "0.4.0" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "abf18303ef7e23b045301555bf8a0dfbc1444ea1a37b3c81757a32680ace4d7d" +checksum = "87ba6170b61f7b086609dabcae68d2e07352539c6ef04a7c82980bdfa01a159d" dependencies = [ "arc-swap", "bytes", @@ -182,18 +169,12 @@ dependencies = [ "hyper", "pin-project-lite", "rustls", - "rustls-pemfile 1.0.0", + "rustls-pemfile 1.0.1", "tokio", "tokio-rustls", "tower-service", ] -[[package]] -name = "base64" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" - [[package]] name = "base64" version = "0.13.0" @@ -211,9 +192,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.59.2" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" dependencies = [ "bitflags", "cexpr", @@ -236,9 +217,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blake2b_simd" -version = "0.5.11" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" dependencies = [ "arrayref", "arrayvec", @@ -256,9 +237,9 @@ dependencies = [ [[package]] name = "block-buffer" -version = "0.10.2" +version = "0.10.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bf7fe51849ea569fd452f37822f606a5cabb684dc918707a0193fd4664ff324" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" dependencies = [ "generic-array", ] @@ -286,15 +267,15 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37ccbd214614c6783386c1af30caf03192f17891059cecc394b4fb119e363de3" +checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" [[package]] name = "bytemuck" -version = "1.9.1" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdead85bdec19c194affaeeb670c0e41fe23de31459efd1c174d049269cf02cc" +checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" [[package]] name = "byteorder" @@ -304,9 +285,20 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" + +[[package]] +name = "bzip2-sys" +version = "0.1.11+1.0.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" +dependencies = [ + "cc", + "libc", + "pkg-config", +] [[package]] name = "cc" @@ -326,36 +318,17 @@ dependencies = [ "nom", ] -[[package]] -name = "cfg-if" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" - [[package]] name = "cfg-if" version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" -[[package]] -name = "chrono" -version = "0.4.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" -dependencies = [ - "libc", - "num-integer", - "num-traits", - "time", - "winapi", -] - [[package]] name = "clang-sys" -version = "1.3.3" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a050e2153c5be08febd6734e29298e844fdb0fa21aeddd63b4eb7baa106c69b" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ "glob", "libc", @@ -364,23 +337,21 @@ dependencies = [ [[package]] name = "clap" -version = "3.2.5" +version = "4.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53da17d37dba964b9b3ecb5c5a1f193a2762c700e6829201e645b9381c99dc7" +checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" dependencies = [ "bitflags", "clap_derive", "clap_lex", - "indexmap", "once_cell", - "textwrap", ] [[package]] name = "clap_derive" -version = "3.2.5" +version = "4.0.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c11d40217d16aee8508cc8e5fde8b4ff24639758608e5374e731b53f85749fb9" +checksum = "db342ce9fda24fb191e2ed4e102055a4d381c1086a06630174cd8da8d5d917ce" dependencies = [ "heck", "proc-macro-error", @@ -391,9 +362,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5538cd660450ebeb4234cfecf8f2284b844ffc4c50531e66d584ad5b91293613" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" dependencies = [ "os_str_bytes", ] @@ -411,7 +382,7 @@ dependencies = [ "async-trait", "axum", "axum-server", - "base64 0.13.0", + "base64", "bytes", "clap", "crossbeam", @@ -441,8 +412,7 @@ dependencies = [ "serde", "serde_json", "serde_yaml", - "sha-1 0.9.8", - "sled", + "sha-1", "thiserror", "thread_local", "threadpool", @@ -487,9 +457,9 @@ checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.2" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59a6001667ab124aebae2a495118e11d30984c3a653e99d86d58971708cf5e4b" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] @@ -515,117 +485,86 @@ version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "crossbeam" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", - "crossbeam-queue 0.3.5", - "crossbeam-utils 0.8.9", + "crossbeam-queue", + "crossbeam-utils", ] [[package]] name = "crossbeam-channel" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c02a4d71819009c192cf4872265391563fd6a84c81ff2c0f2a7026ca4c1d85c" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.9", + "cfg-if", + "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", - "crossbeam-utils 0.8.9", + "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.9" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07db9d94cbd326813772c968ccd25999e5f8ae22f4f8d1b11effa37ef6ce281d" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" dependencies = [ "autocfg", - "cfg-if 1.0.0", - "crossbeam-utils 0.8.9", + "cfg-if", + "crossbeam-utils", "memoffset", - "once_cell", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" -dependencies = [ - "crossbeam-utils 0.6.6", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f25d8400f4a7a5778f0e4e52384a48cbd9b5c495d110786187fc750075277a2" -dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.9", -] - -[[package]] -name = "crossbeam-utils" -version = "0.6.6" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" dependencies = [ - "cfg-if 0.1.10", - "lazy_static", + "cfg-if", + "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.8.9" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ff1f980957787286a554052d03c7aee98d99cc32e09f6d45f0a814133c87978" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" dependencies = [ - "cfg-if 1.0.0", - "once_cell", + "cfg-if", ] [[package]] name = "crypto-common" -version = "0.1.3" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57952ca27b5e3606ff4dd79b0020231aaf9d6aa76dc05fd30137538c50bd3ce8" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ "generic-array", "typenum", ] -[[package]] -name = "crypto-mac" -version = "0.11.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1d1a86f49236c215f271d40892d5fc950490551400b02ef360692c29815c714" -dependencies = [ - "generic-array", - "subtle", -] - [[package]] name = "curve25519-dalek" version = "3.2.1" @@ -645,16 +584,6 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" -[[package]] -name = "deflate" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73770f8e1fe7d64df17ca66ad28994a0a623ea497fa69486e14984e715c5d174" -dependencies = [ - "adler32", - "byteorder", -] - [[package]] name = "der" version = "0.4.5" @@ -675,12 +604,13 @@ dependencies = [ [[package]] name = "digest" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2fb860ca6fafa5552fb6d0e816a69c8e49f0908bf524e30a90d97c85892d506" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "block-buffer 0.10.2", + "block-buffer 0.10.3", "crypto-common", + "subtle", ] [[package]] @@ -728,9 +658,9 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "encoding_rs" @@ -738,14 +668,14 @@ version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.3.4" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "570d109b813e904becc80d8d5da38376818a143348413f7149f1340fe04754d4" +checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2", @@ -767,9 +697,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "figment" -version = "0.10.6" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "790b4292c72618abbab50f787a477014fe15634f96291de45672ce46afe122df" +checksum = "4e56602b469b2201400dec66a66aec5a9b8761ee97cd1b8c96ab2483fcc16cc9" dependencies = [ "atomic", "pear", @@ -786,7 +716,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" dependencies = [ "crc32fast", - "miniz_oxide 0.5.3", + "miniz_oxide", ] [[package]] @@ -797,11 +727,10 @@ checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -823,9 +752,9 @@ checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" [[package]] name = "futures" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f73fe65f54d1e12b726f517d3e2135ca3125a437b6d998caf1962961f7172d9e" +checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" dependencies = [ "futures-channel", "futures-core", @@ -838,9 +767,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3083ce4b914124575708913bca19bfe887522d6e2e6d0952943f5eac4a74010" +checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" dependencies = [ "futures-core", "futures-sink", @@ -848,15 +777,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c09fd04b7e4073ac7156a9539b57a484a8ea920f79c7c675d05d289ab6110d3" +checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" [[package]] name = "futures-executor" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9420b90cfa29e327d0429f19be13e7ddb68fa1cccb09d65e5706b8c7a749b8a6" +checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" dependencies = [ "futures-core", "futures-task", @@ -865,15 +794,15 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc4045962a5a5e935ee2fdedaa4e08284547402885ab326734432bed5d12966b" +checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68" [[package]] name = "futures-macro" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33c1e13800337f4d4d7a316bf45a567dbcb6ffe087f16424852d97e97a91f512" +checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" dependencies = [ "proc-macro2", "quote", @@ -882,21 +811,21 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21163e139fa306126e6eedaf49ecdb4588f939600f0b1e770f4205ee4b7fa868" +checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" [[package]] name = "futures-task" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57c66a976bf5909d801bbef33416c41372779507e7a6b3a5e25e4749c58f776a" +checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" [[package]] name = "futures-util" -version = "0.3.21" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b7abd5d659d9b90c8cba917f6ec750a74e2dc23902ef9cd4cc8c8b22e6036a" +checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" dependencies = [ "futures-channel", "futures-core", @@ -910,20 +839,11 @@ dependencies = [ "slab", ] -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - [[package]] name = "generic-array" -version = "0.14.5" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd48d33ec7f05fbfa152300fdad764757cbded343c1aa1cff2fbaf4134851803" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -935,7 +855,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] @@ -946,16 +866,16 @@ version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] name = "gif" -version = "0.11.3" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3a7187e78088aead22ceedeee99779455b23fc231fe13ec443f99bb71694e5b" +checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06" dependencies = [ "color_quant", "weezl", @@ -969,9 +889,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.13" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "37a82c6d637fc9515a4694bbf1cb2457b79d81ce52b3108bdeea58b07dd34a57" +checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" dependencies = [ "bytes", "fnv", @@ -988,42 +908,36 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ "ahash", ] -[[package]] -name = "hashbrown" -version = "0.12.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db0d4cf898abf0081f964436dc980e96670a0f36863e4b83aaacdb65c9d7ccc3" - [[package]] name = "hashlink" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ - "hashbrown 0.11.2", + "hashbrown", ] [[package]] name = "headers" -version = "0.3.7" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4cff78e5788be1e0ab65b04d306b2ed5092c815ec97ec70f4ebd5aee158aa55d" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" dependencies = [ - "base64 0.13.0", + "base64", "bitflags", "bytes", "headers-core", "http", "httpdate", "mime", - "sha-1 0.10.0", + "sha1", ] [[package]] @@ -1088,12 +1002,11 @@ dependencies = [ [[package]] name = "hmac" -version = "0.11.0" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2a2320eb7ec0ebe8da8f744d7812d9fc4cb4d09344ac01898dbcb6a20ae69b" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" dependencies = [ - "crypto-mac", - "digest 0.9.0", + "digest 0.10.5", ] [[package]] @@ -1137,9 +1050,9 @@ checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" [[package]] name = "httparse" -version = "1.7.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "496ce29bb5a52785b44e0f7ca2847ae0bb839c9bd28f69acac9b99d461c0c04c" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" @@ -1149,9 +1062,9 @@ checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.19" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42dc3c131584288d375f2d07f822b0cb012d8c6fb899a5b9fdb3cb7eb9b6004f" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -1164,7 +1077,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.4", + "socket2", "tokio", "tower-service", "tracing", @@ -1195,18 +1108,27 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "image" -version = "0.23.14" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1" +checksum = "bd8e4fb07cf672b1642304e731ef8a6a4c7891d67bb4fd4f5ce58cd6ed86803c" dependencies = [ "bytemuck", "byteorder", "color_quant", "gif", "jpeg-decoder", - "num-iter", "num-rational", "num-traits", "png", @@ -1214,20 +1136,20 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c6392766afd7964e2531940894cffe4bd8d7d17dbc3c1c4857040fd4b33bdb3" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", - "hashbrown 0.12.1", + "hashbrown", "serde", ] [[package]] name = "indoc" -version = "1.0.6" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05a0bd019339e5d968b37855180087b7b9d512c5046fbd244cf8c95687927d6e" +checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3" [[package]] name = "inlinable_string" @@ -1235,31 +1157,22 @@ version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" -[[package]] -name = "instant" -version = "0.1.12" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c" -dependencies = [ - "cfg-if 1.0.0", -] - [[package]] name = "integer-encoding" -version = "1.1.7" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "ipconfig" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" dependencies = [ - "socket2 0.3.19", + "socket2", "widestring", "winapi", - "winreg 0.6.2", + "winreg", ] [[package]] @@ -1270,39 +1183,39 @@ checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" -version = "0.10.3" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9a9d19fa1e79b6215ff29b9d6880b706147f16e9b1dbb1e4e5947b5b02bc5e3" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" -version = "1.0.2" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "112c678d4050afce233f4f2852bb2eb519230b3cf12f33585275537d7e41578d" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "jpeg-decoder" -version = "0.1.22" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" +checksum = "9478aa10f73e7528198d75109c8be5cd7d15fb530238040148d5f9a22d4c5b3b" [[package]] name = "js-sys" -version = "0.3.58" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3fac17f7123a73ca62df411b1bf727ccc805daa070338fda671c86dac1bdc27" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] @@ -1318,11 +1231,11 @@ dependencies = [ [[package]] name = "jsonwebtoken" -version = "7.2.0" +version = "8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" dependencies = [ - "base64 0.12.3", + "base64", "pem", "ring", "serde", @@ -1344,9 +1257,9 @@ checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" [[package]] name = "libc" -version = "0.2.126" +version = "0.2.134" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "349d5a591cd28b49e1d1037471617a32ddcda5731b99419008085f72d5a53836" +checksum = "329c933548736bc49fd575ee68c89e8be4d260064184389a5b77517cddd99ffb" [[package]] name = "libloading" @@ -1354,27 +1267,41 @@ version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "winapi", ] [[package]] name = "librocksdb-sys" -version = "6.20.3" +version = "0.8.0+7.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" +checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" dependencies = [ "bindgen", + "bzip2-sys", "cc", "glob", "libc", + "libz-sys", + "zstd-sys", ] [[package]] name = "libsqlite3-sys" -version = "0.22.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" +dependencies = [ + "cc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" dependencies = [ "cc", "pkg-config", @@ -1383,9 +1310,9 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lmdb-rkv-sys" @@ -1400,9 +1327,9 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.7" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "327fa5b6a6940e4699ec49a9beae1ea4845c6bab9314e4f84ac68742139d8c53" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ "autocfg", "scopeguard", @@ -1414,7 +1341,7 @@ version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] @@ -1440,9 +1367,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.0.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata", ] @@ -1488,27 +1415,18 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" -dependencies = [ - "adler32", -] - -[[package]] -name = "miniz_oxide" -version = "0.5.3" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f5c75688da582b8ffc1f1799e9db273f32133c49e048f614d22ec3256773ccc" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ "adler", ] [[package]] name = "mio" -version = "0.8.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "713d550d9b44d89174e066b7a6217ae06234c10cb47819a88290d2b353c31799" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", @@ -1527,42 +1445,41 @@ dependencies = [ ] [[package]] -name = "num-bigint" -version = "0.2.6" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ - "autocfg", - "num-integer", - "num-traits", + "overload", + "winapi", ] [[package]] -name = "num-integer" -version = "0.1.45" +name = "num-bigint" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", + "num-integer", "num-traits", ] [[package]] -name = "num-iter" -version = "0.1.43" +name = "num-integer" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d03e6c028c5dc5cac6e2dec0efda81fc887605bb3d884578bb6d6bf7514e252" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", - "num-integer", "num-traits", ] [[package]] name = "num-rational" -version = "0.3.2" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg", "num-integer", @@ -1588,11 +1505,20 @@ dependencies = [ "libc", ] +[[package]] +name = "num_threads" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" +dependencies = [ + "libc", +] + [[package]] name = "once_cell" -version = "1.12.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709cef83f0c1f58f666e746a08b21e0085f7440fa6a29cc194d68aac97a4225" +checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" [[package]] name = "opaque-debug" @@ -1608,31 +1534,24 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf9b1c4e9a6c4de793c632496fa490bdc0e1eea73f0c91394f7b6990935d22" +checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" dependencies = [ - "async-trait", - "crossbeam-channel", - "futures", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.5", - "thiserror", - "tokio", - "tokio-stream", + "opentelemetry_api", + "opentelemetry_sdk", ] [[package]] name = "opentelemetry-jaeger" -version = "0.15.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db22f492873ea037bc267b35a0e8e4fb846340058cb7c864efe3d0bf23684593" +checksum = "1e785d273968748578931e4dc3b4f5ec86b26e09d9e0d66b55adda7fce742f7a" dependencies = [ "async-trait", - "lazy_static", + "futures", + "futures-executor", + "once_cell", "opentelemetry", "opentelemetry-semantic-conventions", "thiserror", @@ -1642,13 +1561,48 @@ dependencies = [ [[package]] name = "opentelemetry-semantic-conventions" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffeac823339e8b0f27b961f4385057bf9f97f2863bc745bd015fd6091f2270e9" +checksum = "9b02e0230abb0ab6636d18e2ba8fa02903ea63772281340ccac18e0af3ec9eeb" dependencies = [ "opentelemetry", ] +[[package]] +name = "opentelemetry_api" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" +dependencies = [ + "futures-channel", + "futures-util", + "indexmap", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", +] + +[[package]] +name = "opentelemetry_sdk" +version = "0.18.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" +dependencies = [ + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "percent-encoding", + "rand 0.8.5", + "thiserror", + "tokio", + "tokio-stream", +] + [[package]] name = "ordered-float" version = "1.1.1" @@ -1660,9 +1614,15 @@ dependencies = [ [[package]] name = "os_str_bytes" -version = "6.1.0" +version = "6.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" + +[[package]] +name = "overload" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21326818e99cfe6ce1e524c2a805c189a99b5ae555a35d19f9a284b427d86afa" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" [[package]] name = "page_size" @@ -1676,34 +1636,32 @@ dependencies = [ [[package]] name = "parking_lot" -version = "0.11.2" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ - "cfg-if 1.0.0", - "instant", + "cfg-if", "libc", "redox_syscall", "smallvec", - "winapi", + "windows-sys", ] [[package]] name = "paste" -version = "1.0.7" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c520e05135d6e763148b6426a837e239041653ba7becd2e538c076c738025fc" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pear" @@ -1736,26 +1694,24 @@ checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" [[package]] name = "pem" -version = "0.8.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ - "base64 0.13.0", - "once_cell", - "regex", + "base64", ] [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" [[package]] name = "persy" -version = "1.2.6" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5af61053f1daed3ff0265fad7f924e43ce07642a336c79304f8e5aec205460fb" +checksum = "5511189f4dbd737283b0dd2ff6715f2e35fd0d3e1ddf953ed6a772e439e1f73f" dependencies = [ "crc", "data-encoding", @@ -1769,18 +1725,18 @@ dependencies = [ [[package]] name = "pin-project" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58ad3879ad3baf4e44784bc6a718a8698867bb991f8ce24d1bcbe2cfb4c3a75e" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "744b6f092ba29c3650faf274db506afd39944f48420f6c86b17cfe0ee1cb36bb" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -1818,14 +1774,14 @@ checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "png" -version = "0.16.8" +version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" +checksum = "8f0e7f4c94ec26ff209cee506314212639d6c91b80afb82984819fafce9df01c" dependencies = [ "bitflags", "crc32fast", - "deflate", - "miniz_oxide 0.3.7", + "flate2", + "miniz_oxide", ] [[package]] @@ -1836,10 +1792,11 @@ checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-crate" -version = "1.1.3" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e17d47ce914bf4de440332250b0edd23ce48c005f59fab39d3335866b114f11a" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ + "once_cell", "thiserror", "toml", ] @@ -1870,9 +1827,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.39" +version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c54b25569025b7fc9651de43004ae593a75ad88543b17178aa5e1b9c4f15f56f" +checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" dependencies = [ "unicode-ident", ] @@ -1898,9 +1855,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.19" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f53dc8cf16a769a6f677e09e7ff2cd4be1ea0f48754aac39520536962011de0d" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] @@ -1926,7 +1883,7 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -1946,7 +1903,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -1960,9 +1917,9 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ "getrandom 0.2.7", ] @@ -1978,9 +1935,9 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.2.13" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62f25bc4c7e55e0b0b7a1d43fb893f4fa1361d0abe38b9ce4f323c2adfe6ef42" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] @@ -1998,9 +1955,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.5.6" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d83f127d94bdbcda4c8cc2e50f6f84f4b611f69c902699ca385a39c3a75f9ff1" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -2018,16 +1975,16 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.26" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49b3de9ec5dc0a3417da371aab17d729997c15010e7fd24ff707773a33bddb64" +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "reqwest" version = "0.11.9" source = "git+https://github.com/timokoesters/reqwest?rev=57b7cf4feb921573dfafad7d34b9ac6e44ead0bd#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ - "base64 0.13.0", + "base64", "bytes", "encoding_rs", "futures-core", @@ -2057,7 +2014,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.7.0", + "winreg", ] [[package]] @@ -2087,9 +2044,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.17.0" +version = "0.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" +checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" dependencies = [ "libc", "librocksdb-sys", @@ -2143,7 +2100,7 @@ name = "ruma-common" version = "0.8.0" source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ - "base64 0.13.0", + "base64", "bytes", "form_urlencoded", "http", @@ -2222,7 +2179,7 @@ name = "ruma-signatures" version = "0.10.0" source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" dependencies = [ - "base64 0.13.0", + "base64", "ed25519-dalek", "pkcs8", "rand 0.7.3", @@ -2249,29 +2206,28 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.4" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c4b1eaf239b47034fb450ee9cdedd7d0226571689d8823030c4b6c2cb407152" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", - "memchr", "smallvec", ] [[package]] name = "rust-argon2" -version = "0.8.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" +checksum = "b50162d19404029c1ceca6f6980fe40d45c8b369f6f44446fa14bb39573b5bb9" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.9", + "crossbeam-utils", ] [[package]] @@ -2299,7 +2255,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls-pemfile 1.0.0", + "rustls-pemfile 1.0.1", "schannel", "security-framework", ] @@ -2310,23 +2266,23 @@ version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" dependencies = [ - "base64 0.13.0", + "base64", ] [[package]] name = "rustls-pemfile" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e7522c9de787ff061458fe9a829dc790a3f5b22dc571694fc5883f448b94d9a9" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" dependencies = [ - "base64 0.13.0", + "base64", ] [[package]] name = "ryu" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3f6f92acf49d1b98f7a81226834412ada05458b7364277387724a237f062695" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "schannel" @@ -2356,9 +2312,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.6.1" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dc14f172faf8a0194a3aded622712b0de276821addc574fa54fc0a1167e10dc" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -2379,18 +2335,18 @@ dependencies = [ [[package]] name = "serde" -version = "1.0.137" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61ea8d54c77f8315140a05f4c7237403bf38b72704d031543aa1d16abbf517d1" +checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.137" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f26faba0c3959972377d3b2d306ee9f71faee9714294e41bb777f83f88578be" +checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" dependencies = [ "proc-macro2", "quote", @@ -2399,9 +2355,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.81" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b7ce2b32a1aed03c558dc61a5cd328f15aff2dbc17daad8fb8af04d2100e15c" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "itoa", "ryu", @@ -2422,38 +2378,37 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.24" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "707d15895415db6628332b737c838b88c598522e4dc70647e59b72312924aebc" +checksum = "8613d593412a0deb7bbd8de9d908efff5a0cb9ccd8f62c641e7b2ed2f57291d1" dependencies = [ "indexmap", + "itoa", "ryu", "serde", - "yaml-rust", + "unsafe-libyaml", ] [[package]] name = "sha-1" -version = "0.9.8" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99cd6713db3cf16b6c84e06321e049a9b9f699826e16096d23bbcc44d15d51a6" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" dependencies = [ - "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "digest 0.9.0", - "opaque-debug", + "digest 0.10.5", ] [[package]] -name = "sha-1" -version = "0.10.0" +name = "sha1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", - "digest 0.10.3", + "digest 0.10.5", ] [[package]] @@ -2463,7 +2418,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ "block-buffer 0.9.0", - "cfg-if 1.0.0", + "cfg-if", "cpufeatures", "digest 0.9.0", "opaque-debug", @@ -2495,66 +2450,42 @@ dependencies = [ [[package]] name = "signature" -version = "1.5.0" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f054c6c1a6e95179d6f23ed974060dcefb2d9388bb7256900badad682c499de4" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" [[package]] name = "simple_asn1" -version = "0.4.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "chrono", "num-bigint", "num-traits", + "thiserror", + "time", ] [[package]] name = "slab" -version = "0.4.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb703cfe953bccee95685111adeedb76fabe4e97549a58d16f03ea7b9367bb32" - -[[package]] -name = "sled" -version = "0.34.7" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f96b4737c2ce5987354855aed3797279def4ebf734436c6aa4552cf8e169935" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" dependencies = [ - "crc32fast", - "crossbeam-epoch", - "crossbeam-utils 0.8.9", - "fs2", - "fxhash", - "libc", - "log", - "parking_lot", - "zstd", + "autocfg", ] [[package]] name = "smallvec" -version = "1.8.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2dd574626839106c320a323308629dcb1acfc96e32a8cba364ddc61ac23ee83" +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.4.4" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66d72b759436ae32898a2af0a14218dbf55efde3feeb170eb623637db85ee1e0" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ "libc", "winapi", @@ -2583,9 +2514,9 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.98" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c50aef8a904de4c23c788f104b7dddc7d6f79c647c7c8ce4cc8f73eb0ca773dd" +checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" dependencies = [ "proc-macro2", "quote", @@ -2600,11 +2531,11 @@ checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" [[package]] name = "synchronoise" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d717ed0efc9d39ab3b642a096bc369a3e02a38a51c41845d7fe31bdad1d6eaeb" +checksum = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2" dependencies = [ - "crossbeam-queue 0.1.2", + "crossbeam-queue", ] [[package]] @@ -2619,26 +2550,20 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "textwrap" -version = "0.15.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1141d4d61095b28419e22cb0bbf02755f5e54e0526f97f1e3d1d160e60885fb" - [[package]] name = "thiserror" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd829fe32373d27f76265620b5309d0340cb8550f523c1dda251d6298069069a" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.31" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0396bc89e626244658bef819e22d0cc459e795a5ebe878e6ec336d1674a8d79a" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -2665,9 +2590,9 @@ dependencies = [ [[package]] name = "thrift" -version = "0.13.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" +checksum = "09678c4cdbb4eed72e18b7c2af1329c69825ed16fcbac62d083fc3e2b0590ff0" dependencies = [ "byteorder", "integer-encoding", @@ -2678,9 +2603,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-ctl" -version = "0.4.2" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb833c46ecbf8b6daeccb347cefcabf9c1beb5c9b0f853e1cec45632d9963e69" +checksum = "e37706572f4b151dff7a0146e040804e9c26fe3a3118591112f05cf12a4216c1" dependencies = [ "libc", "paste", @@ -2689,9 +2614,9 @@ dependencies = [ [[package]] name = "tikv-jemalloc-sys" -version = "0.4.3+5.2.1-patched.2" +version = "0.5.2+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1792ccb507d955b46af42c123ea8863668fae24d03721e40cad6a41773dbb49" +checksum = "ec45c14da997d0925c7835883e4d5c181f196fa142f8c19d7643d1e9af2592c3" dependencies = [ "cc", "fs_extra", @@ -2700,9 +2625,9 @@ dependencies = [ [[package]] name = "tikv-jemallocator" -version = "0.4.3" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a5b7bcecfafe4998587d636f9ae9d55eb9d0499877b88757767c346875067098" +checksum = "20612db8a13a6c06d57ec83953694185a367e16945f66565e8028d2c0bd76979" dependencies = [ "libc", "tikv-jemalloc-sys", @@ -2710,15 +2635,22 @@ dependencies = [ [[package]] name = "time" -version = "0.1.44" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6db9e6914ab8b1ae1c260a4ae7a49b6c5611b40328a735b21862567685e73255" +checksum = "d634a985c4d4238ec39cacaed2e7ae552fbd3c476b552c1deac3021b7d7eaf0c" dependencies = [ + "itoa", "libc", - "wasi 0.10.0+wasi-snapshot-preview1", - "winapi", + "num_threads", + "time-macros", ] +[[package]] +name = "time-macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" + [[package]] name = "tinyvec" version = "1.6.0" @@ -2736,19 +2668,19 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.19.2" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c51a52ed6686dd62c320f9b89299e9dfb46f730c7a48e635c19f21d116cb1439" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ + "autocfg", "bytes", "libc", "memchr", "mio", "num_cpus", - "once_cell", "pin-project-lite", "signal-hook-registry", - "socket2 0.4.4", + "socket2", "tokio-macros", "winapi", ] @@ -2789,9 +2721,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.9" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df54d54117d6fdc4e4fea40fe1e4e566b3505700e148a6827e59b34b0d2600d9" +checksum = "f6edf2d6bc038a43d31353570e27270603f4648d18f5ed10c0e179abe43255af" dependencies = [ "futures-core", "pin-project-lite", @@ -2800,9 +2732,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc463cd8deddc3770d20f9852143d50bf6094e640b485cb2e189a2099085ff45" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", @@ -2874,11 +2806,11 @@ checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.35" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a400e31aa60b9d44a52a8ee0343b5b18566b03a8321e0d321f695cf56e940160" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "log", "pin-project-lite", "tracing-attributes", @@ -2887,9 +2819,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.21" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc6b8ad3567499f98a1db7a752b07a7c8c7c7c34c332ec00effb2b0027974b7c" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", @@ -2898,9 +2830,9 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.27" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7709595b8878a4965ce5e87ebf880a7d39c9afc6837721b21a5a816a8117d921" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ "once_cell", "valuable", @@ -2908,9 +2840,9 @@ dependencies = [ [[package]] name = "tracing-flame" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd520fe41c667b437952383f3a1ec14f1fa45d653f719a77eedd6e6a02d8fa54" +checksum = "0bae117ee14789185e129aaee5d93750abe67fdc5a9a62650452bfe4e122a3a9" dependencies = [ "lazy_static", "tracing", @@ -2928,80 +2860,66 @@ dependencies = [ "tracing-core", ] -[[package]] -name = "tracing-serde" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6b213177105856957181934e4920de57730fc69bf42c37ee5bb664d406d9e1" -dependencies = [ - "serde", - "tracing-core", -] - [[package]] name = "tracing-subscriber" -version = "0.2.25" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e0d2eaa99c3c2e41547cfa109e910a68ea03823cccad4a0525dcbc9b01e8c71" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ - "ansi_term", - "chrono", - "lazy_static", "matchers", + "nu-ansi-term", + "once_cell", "regex", - "serde", - "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", - "tracing-serde", ] [[package]] name = "trust-dns-proto" -version = "0.20.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca94d4e9feb6a181c690c4040d7a24ef34018d8313ac5044a61d21222ae24e31" +checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.2.3", "ipnet", "lazy_static", - "log", "rand 0.8.5", "smallvec", "thiserror", "tinyvec", "tokio", + "tracing", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.20.4" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ecae383baad9995efaa34ce8e57d12c3f305e545887472a492b838f4b5cfb77a" +checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lazy_static", - "log", "lru-cache", "parking_lot", "resolv-conf", "smallvec", "thiserror", "tokio", + "tracing", "trust-dns-proto", ] @@ -3034,24 +2952,30 @@ checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] name = "unicode-ident" -version = "1.0.1" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bd2fe26506023ed7b5e1e315add59d6f584c621d037f9368fea9cfb988f368c" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] name = "unicode-xid" -version = "0.2.3" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" + +[[package]] +name = "unsafe-libyaml" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e51f3646910546462e67d5f7599b9e4fb8acdd304b087a6494730f9eebf04" +checksum = "c1e5fa573d8ac5f1a856f8d7be41d390ee973daf97c806b2c1a465e4e1406e68" [[package]] name = "unsigned-varint" @@ -3067,13 +2991,12 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] @@ -3120,12 +3043,6 @@ version = "0.9.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" -[[package]] -name = "wasi" -version = "0.10.0+wasi-snapshot-preview1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a143597ca7c7793eff794def352d41792a93c481eb1042423ff7ff72ba2c31f" - [[package]] name = "wasi" version = "0.11.0+wasi-snapshot-preview1" @@ -3134,23 +3051,23 @@ checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c53b543413a17a202f4be280a7e5c62a1c69345f5de525ee64f8cfdbc954994" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5491a68ab4500fa6b4d726bd67408630c3dbe9c4fe7bda16d5c82a1fd8c7340a" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -3159,11 +3076,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.31" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de9a9cec1733468a8c657e57fa2413d2ae2c0129b95e87c5b72b8ace4d13f31f" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -3171,9 +3088,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c441e177922bc58f1e12c022624b6216378e5febc2f0533e41ba443d505b80aa" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3181,9 +3098,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d94ac45fcf608c1f45ef53e748d35660f168490c10b23704c7779ab8f5c3048" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -3194,15 +3111,15 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.81" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a89911bd99e5f3659ec4acf9c4d93b0a90fe4a2a11f15328472058edc5261be" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "web-sys" -version = "0.3.58" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fed94beee57daf8dd7d51f2b15dc2bcde92d7a72304cdf662a4371008b71b90" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", @@ -3220,21 +3137,21 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c97e489d8f836838d497091de568cf16b117486d529ec5579233521065bd5e4" +checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb" [[package]] name = "widestring" -version = "0.4.3" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" +checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" [[package]] name = "wildmatch" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c48bd20df7e4ced539c12f570f937c6b4884928a87fee70a479d72f031d4e0" +checksum = "ee583bdc5ff1cf9db20e9db5bb3ff4c3089a8f6b8b31aff265c9aba85812db86" [[package]] name = "winapi" @@ -3301,15 +3218,6 @@ version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" -[[package]] -name = "winreg" -version = "0.6.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" -dependencies = [ - "winapi", -] - [[package]] name = "winreg" version = "0.7.0" @@ -3319,15 +3227,6 @@ dependencies = [ "winapi", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yansi" version = "0.5.1" @@ -3364,30 +3263,11 @@ dependencies = [ "num-traits", ] -[[package]] -name = "zstd" -version = "0.9.2+zstd.1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2390ea1bf6c038c39674f22d95f0564725fc06034a47129179810b2fc58caa54" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "4.1.3+zstd.1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e99d81b99fb3c2c2c794e3fe56c305c63d5173a16a46b5850b07c935ffc7db79" -dependencies = [ - "libc", - "zstd-sys", -] - [[package]] name = "zstd-sys" -version = "1.6.2+zstd.1.5.1" +version = "2.0.1+zstd.1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2daf2f248d9ea44454bfcb2516534e8b8ad2fc91bf818a1885495fc42bc8ac9f" +checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" dependencies = [ "cc", "libc", diff --git a/Cargo.toml b/Cargo.toml index d5f3c9dd..83f03ce8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318c # Async runtime and utilities tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently -sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } +#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } persy = { version = "1.0.0", optional = true, features = ["background_ops"] } @@ -40,62 +40,62 @@ directories = "4.0.0" # Used for ruma wrapper serde_json = { version = "1.0.68", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.8.21" +serde_yaml = "0.9.13" # Used for pdu definition serde = { version = "1.0.130", features = ["rc"] } # Used for secure identifiers rand = "0.8.4" # Used to hash passwords -rust-argon2 = "0.8.3" +rust-argon2 = "1.0.0" # Used to send requests reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type thiserror = "1.0.29" # Used to generate thumbnails for images -image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.24.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key base64 = "0.13.0" # Used when hashing the state ring = "0.16.20" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.20.3" +trust-dns-resolver = "0.22.0" # Used to find matching events for appservices regex = "1.5.4" # jwt jsonwebtokens -jsonwebtoken = "7.2.0" +jsonwebtoken = "8.1.1" # Performance measurements tracing = { version = "0.1.27", features = [] } -tracing-subscriber = "0.2.22" -tracing-flame = "0.1.0" -opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } -opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } +tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } +tracing-flame = "0.2.0" +opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } +opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } lru-cache = "0.1.2" -rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } -parking_lot = { version = "0.11.2", optional = true } +rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] } +parking_lot = { version = "0.12.1", optional = true } crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.19.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication -hmac = "0.11.0" -sha-1 = "0.9.8" +hmac = "0.12.1" +sha-1 = "0.10.0" # used for conduit's CLI and admin room command parsing -clap = { version = "3.2.5", default-features = false, features = ["std", "derive"] } +clap = { version = "4.0.11", default-features = false, features = ["std", "derive"] } futures-util = { version = "0.3.17", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.6", features = ["env", "toml"] } -tikv-jemalloc-ctl = { version = "0.4.2", features = ["use_std"], optional = true } -tikv-jemallocator = { version = "0.4.1", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } +tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true } +tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } lazy_static = "1.4.0" async-trait = "0.1.57" [features] -default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] -backend_sled = ["sled"] +default = ["conduit_bin", "backend_sqlite", "jemalloc"] +#backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs index dc9caaae..6b1ee400 100644 --- a/src/api/client_server/voip.rs +++ b/src/api/client_server/voip.rs @@ -1,5 +1,5 @@ use crate::{services, Result, Ruma}; -use hmac::{Hmac, Mac, NewMac}; +use hmac::{Hmac, Mac}; use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; use sha1::Sha1; use std::time::{Duration, SystemTime}; diff --git a/src/main.rs b/src/main.rs index ce7e5785..0bba2aba 100644 --- a/src/main.rs +++ b/src/main.rs @@ -86,7 +86,7 @@ async fn main() { if config.allow_jaeger { opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_pipeline() + let tracer = opentelemetry_jaeger::new_agent_pipeline() .install_batch(opentelemetry::runtime::Tokio) .unwrap(); diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 054df095..f88fd023 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -40,7 +40,7 @@ pub struct Service { pub config: Config, keypair: Arc, dns_resolver: TokioAsyncResolver, - jwt_decoding_key: Option>, + jwt_decoding_key: Option, federation_client: reqwest::Client, default_client: reqwest::Client, pub stable_room_versions: Vec, @@ -105,7 +105,7 @@ impl Service { let jwt_decoding_key = config .jwt_secret .as_ref() - .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); + .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); let default_client = reqwest_client_builder(&config)?.build()?; let name_override = Arc::clone(&tls_name_override); @@ -250,7 +250,7 @@ impl Service { &self.dns_resolver } - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { + pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { self.jwt_decoding_key.as_ref() } diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs index 66841087..93937533 100644 --- a/src/service/media/mod.rs +++ b/src/service/media/mod.rs @@ -1,8 +1,10 @@ mod data; +use std::io::Cursor; + pub use data::Data; use crate::{services, Result}; -use image::{imageops::FilterType, GenericImageView}; +use image::imageops::FilterType; use tokio::{ fs::File, @@ -186,7 +188,10 @@ impl Service { }; let mut thumbnail_bytes = Vec::new(); - thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; + thumbnail.write_to( + &mut Cursor::new(&mut thumbnail_bytes), + image::ImageOutputFormat::Png, + )?; // Save thumbnail in database so we don't have to generate it again next time let thumbnail_key = self.db.create_file_metadata( From 6b131202b9c2ec36043d73ffd2d787093e4b9fed Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 17:25:06 +0200 Subject: [PATCH 401/445] Bump ruma --- Cargo.lock | 106 ++++++++------ Cargo.toml | 2 +- src/api/client_server/account.rs | 4 + src/api/client_server/directory.rs | 16 +++ src/api/client_server/keys.rs | 6 +- src/api/client_server/membership.rs | 21 ++- src/api/client_server/read_marker.rs | 133 +++++++++++------- src/api/client_server/room.rs | 8 +- src/api/client_server/session.rs | 2 + src/api/client_server/sync.rs | 19 ++- src/api/ruma_wrapper/axum.rs | 11 +- src/api/ruma_wrapper/mod.rs | 9 +- src/api/server_server.rs | 22 +-- src/config/mod.rs | 6 +- src/database/key_value/globals.rs | 7 +- src/database/key_value/key_backups.rs | 6 +- src/database/key_value/rooms/alias.rs | 6 +- src/database/key_value/rooms/directory.rs | 4 +- src/database/key_value/rooms/edus/presence.rs | 6 +- .../key_value/rooms/edus/read_receipt.rs | 4 +- src/database/key_value/rooms/edus/typing.rs | 4 +- src/database/key_value/rooms/metadata.rs | 4 +- src/database/key_value/rooms/outlier.rs | 2 +- src/database/key_value/rooms/state.rs | 4 +- src/database/key_value/rooms/state_cache.rs | 20 +-- src/database/key_value/rooms/timeline.rs | 6 +- src/database/key_value/rooms/user.rs | 10 +- src/database/key_value/uiaa.rs | 3 +- src/database/key_value/users.rs | 22 +-- src/database/mod.rs | 20 +-- src/service/admin/mod.rs | 7 +- src/service/globals/data.rs | 6 +- src/service/globals/mod.rs | 25 ++-- src/service/key_backups/data.rs | 4 +- src/service/key_backups/mod.rs | 4 +- src/service/pdu.rs | 20 +-- src/service/pusher/mod.rs | 8 +- src/service/rooms/alias/data.rs | 6 +- src/service/rooms/alias/mod.rs | 6 +- src/service/rooms/directory/data.rs | 4 +- src/service/rooms/directory/mod.rs | 4 +- src/service/rooms/edus/presence/data.rs | 4 +- src/service/rooms/edus/presence/mod.rs | 4 +- src/service/rooms/edus/read_receipt/data.rs | 4 +- src/service/rooms/edus/read_receipt/mod.rs | 4 +- src/service/rooms/edus/typing/data.rs | 4 +- src/service/rooms/event_handler/mod.rs | 17 ++- src/service/rooms/lazy_loading/mod.rs | 6 +- src/service/rooms/metadata/data.rs | 4 +- src/service/rooms/metadata/mod.rs | 4 +- src/service/rooms/outlier/data.rs | 2 +- src/service/rooms/outlier/mod.rs | 2 +- src/service/rooms/state/data.rs | 4 +- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/state_cache/data.rs | 20 +-- src/service/rooms/state_cache/mod.rs | 20 +-- src/service/rooms/state_compressor/mod.rs | 6 +- src/service/rooms/timeline/data.rs | 6 +- src/service/rooms/timeline/mod.rs | 19 +-- src/service/rooms/user/data.rs | 6 +- src/service/rooms/user/mod.rs | 6 +- src/service/sending/mod.rs | 15 +- src/service/uiaa/data.rs | 2 +- src/service/uiaa/mod.rs | 3 +- src/service/users/data.rs | 17 +-- src/service/users/mod.rs | 17 +-- src/utils/error.rs | 4 +- src/utils/mod.rs | 2 +- 68 files changed, 446 insertions(+), 347 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 2583c524..29603eea 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -181,6 +181,12 @@ version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" +[[package]] +name = "base64ct" +version = "1.5.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ea2b2456fd614d856680dcd9fcc660a51a820fa09daef2e49772b56a193c8474" + [[package]] name = "bincode" version = "1.3.3" @@ -429,9 +435,9 @@ dependencies = [ [[package]] name = "const-oid" -version = "0.6.2" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d6f2aa4d0537bcc1c74df8755072bd31c1ef1a3a1b85a68e8404a8c353b7b8b" +checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" [[package]] name = "constant_time_eq" @@ -567,9 +573,9 @@ dependencies = [ [[package]] name = "curve25519-dalek" -version = "3.2.1" +version = "3.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90f9d052967f590a76e62eb387bd0bbb1b000182c3cefe5364db6b7211651bc0" +checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", "digest 0.9.0", @@ -586,11 +592,12 @@ checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" [[package]] name = "der" -version = "0.4.5" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "79b71cca7d95d7681a4b3b9cdf63c8dbc3730d0584c2c74e31416d64a90493f4" +checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" dependencies = [ "const-oid", + "zeroize", ] [[package]] @@ -1145,12 +1152,6 @@ dependencies = [ "serde", ] -[[package]] -name = "indoc" -version = "1.0.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adab1eaa3408fb7f0c777a73e7465fd5656136fc93b670eb6df3c88c2c1344e3" - [[package]] name = "inlinable_string" version = "0.1.15" @@ -1229,6 +1230,15 @@ dependencies = [ "serde", ] +[[package]] +name = "js_option" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68421373957a1593a767013698dbf206e2b221eefe97a44d98d18672ff38423c" +dependencies = [ + "serde", +] + [[package]] name = "jsonwebtoken" version = "8.1.1" @@ -1757,13 +1767,12 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.7.6" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee3ef9b64d26bad0536099c816c6734379e45bbd5f14798def6809e5cc350447" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ "der", "spki", - "zeroize", ] [[package]] @@ -2054,11 +2063,12 @@ dependencies = [ [[package]] name = "ruma" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.7.4" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "assign", "js_int", + "js_option", "ruma-appservice-api", "ruma-client-api", "ruma-common", @@ -2071,8 +2081,8 @@ dependencies = [ [[package]] name = "ruma-appservice-api" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "ruma-common", "serde", @@ -2081,8 +2091,8 @@ dependencies = [ [[package]] name = "ruma-client-api" -version = "0.13.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.15.1" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "assign", "bytes", @@ -2097,19 +2107,20 @@ dependencies = [ [[package]] name = "ruma-common" -version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.10.3" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "base64", "bytes", "form_urlencoded", "http", "indexmap", - "indoc", "itoa", "js_int", + "js_option", "percent-encoding", "rand 0.8.5", + "regex", "ruma-identifiers-validation", "ruma-macros", "serde", @@ -2123,8 +2134,8 @@ dependencies = [ [[package]] name = "ruma-federation-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "js_int", "ruma-common", @@ -2134,17 +2145,17 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" -version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ + "js_int", "thiserror", - "url", ] [[package]] name = "ruma-identity-service-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "js_int", "ruma-common", @@ -2153,20 +2164,23 @@ dependencies = [ [[package]] name = "ruma-macros" -version = "0.1.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.10.3" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ + "once_cell", "proc-macro-crate", "proc-macro2", "quote", "ruma-identifiers-validation", + "serde", "syn", + "toml", ] [[package]] name = "ruma-push-gateway-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "js_int", "ruma-common", @@ -2176,8 +2190,8 @@ dependencies = [ [[package]] name = "ruma-signatures" -version = "0.10.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.12.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "base64", "ed25519-dalek", @@ -2187,13 +2201,12 @@ dependencies = [ "serde_json", "sha2", "thiserror", - "tracing", ] [[package]] name = "ruma-state-res" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=d614ad1422d6c4b3437ebc318ca8514ae338fd6d#d614ad1422d6c4b3437ebc318ca8514ae338fd6d" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" dependencies = [ "itertools", "js_int", @@ -2499,10 +2512,11 @@ checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] name = "spki" -version = "0.4.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c01a0c15da1b0b0e1494112e7af814a678fec9bd157881b49beac661e9b6f32" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ + "base64ct", "der", ] @@ -3002,9 +3016,9 @@ dependencies = [ [[package]] name = "uuid" -version = "0.8.2" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc5cf98d8186244414c848017f0e2676b3fcb46807f6668a97dfe67359a3c4b7" +checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" dependencies = [ "getrandom 0.2.7", ] @@ -3235,9 +3249,9 @@ checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.3.0" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4756f7db3f7b5574938c3eb1c117038b8e07f95ee6718c0efad4ac21508f1efd" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] diff --git a/Cargo.toml b/Cargo.toml index 83f03ce8..0428e746 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "d614ad1422d6c4b3437ebc318ca8514ae338fd6d", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 58624a28..673bbb42 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -193,6 +193,8 @@ pub async fn register_route( access_token: None, user_id, device_id: None, + refresh_token: None, + expires_in: None, }); } @@ -238,6 +240,8 @@ pub async fn register_route( access_token: Some(token), user_id, device_id: Some(device_id), + refresh_token: None, + expires_in: None, }) } diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 90f79a02..7c4aa50b 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -19,6 +19,7 @@ use ruma::{ room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, join_rules::{JoinRule, RoomJoinRulesEventContent}, @@ -135,6 +136,7 @@ pub(crate) async fn get_public_rooms_filtered_helper( since, filter: Filter { generic_search_term: filter.generic_search_term.as_deref(), + room_types: filter.room_types.clone(), }, room_network: RoomNetwork::Matrix, }, @@ -287,6 +289,20 @@ pub(crate) async fn get_public_rooms_filtered_helper( .transpose()? .flatten() .ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?, + room_type: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + .map(|s| { + serde_json::from_str::(s.content.get()).map_err( + |e| { + error!("Invalid room create event in database: {}", e); + Error::BadDatabase("Invalid room create event in database.") + }, + ) + }) + .transpose()? + .and_then(|e| e.room_type), room_id, }; Ok(chunk) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index be62cc22..33ff309f 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -14,7 +14,7 @@ use ruma::{ federation, }, serde::Raw, - DeviceId, DeviceKeyAlgorithm, UserId, + DeviceId, DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use std::collections::{BTreeMap, HashMap, HashSet}; @@ -253,7 +253,7 @@ pub async fn get_key_changes_route( pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, - device_keys_input: &BTreeMap, Vec>>, + device_keys_input: &BTreeMap>, allowed_signatures: F, ) -> Result { let mut master_keys = BTreeMap::new(); @@ -396,7 +396,7 @@ fn add_unsigned_device_display_name( } pub(crate) async fn claim_keys_helper( - one_time_keys_input: &BTreeMap, BTreeMap, DeviceKeyAlgorithm>>, + one_time_keys_input: &BTreeMap>, ) -> Result { let mut one_time_keys = BTreeMap::new(); diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index a91d079a..4f791c71 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -10,12 +10,14 @@ use ruma::{ }, federation::{self, membership::create_invite}, }, + canonical_json::to_canonical_value, events::{ room::member::{MembershipState, RoomMemberEventContent}, RoomEventType, StateEventType, }, - serde::{to_canonical_value, Base64, CanonicalJsonObject, CanonicalJsonValue}, - EventId, RoomId, RoomVersionId, ServerName, UserId, + serde::Base64, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + RoomId, RoomVersionId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -83,7 +85,7 @@ pub async fn join_room_by_id_or_alias_route( let sender_user = body.sender_user.as_deref().expect("user is authenticated"); let body = body.body; - let (servers, room_id) = match Box::::try_from(body.room_id_or_alias) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { Ok(room_id) => { let mut servers = body.server_name.clone(); servers.extend( @@ -458,7 +460,7 @@ pub async fn joined_members_route( async fn join_room_by_id_helper( sender_user: Option<&UserId>, room_id: &RoomId, - servers: &[Box], + servers: &[OwnedServerName], _third_party_signed: Option<&IncomingThirdPartySigned>, ) -> Result { let sender_user = sender_user.expect("user is authenticated"); @@ -673,7 +675,12 @@ async fn join_room_by_id_helper( room_id, state .into_iter() - .map(|(k, id)| services().rooms.state_compressor.compress_state_event(k, &id)) + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) .collect::>()?, )?; @@ -737,7 +744,7 @@ fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, pub_key_map: &RwLock>>, -) -> Result<(Box, CanonicalJsonObject)> { +) -> Result<(OwnedEventId, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") @@ -896,7 +903,7 @@ pub(crate) async fn invite_helper<'a>( warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); } - let origin: Box = serde_json::from_value( + let origin: OwnedServerName = serde_json::from_value( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event needs an origin field.", diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index fd0e090e..bdf467f9 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,8 +1,7 @@ use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::RoomAccountDataEventType, - receipt::ReceiptType, + events::{receipt::ReceiptType, RoomAccountDataEventType}, MilliSecondsSinceUnixEpoch, }; use std::collections::BTreeMap; @@ -18,19 +17,28 @@ pub async fn set_read_marker_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { - event_id: body.fully_read.clone(), - }, - }; - services().account_data.update( - Some(&body.room_id), - sender_user, - RoomAccountDataEventType::FullyRead, - &serde_json::to_value(fully_read_event).expect("to json value always works"), - )?; + if let Some(fully_read) = &body.fully_read { + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: fully_read.clone(), + }, + }; + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + )?; + } - if let Some(event) = &body.read_receipt { + if body.private_read_receipt.is_some() || body.read_receipt.is_some() { + services() + .rooms + .user + .reset_notification_counts(sender_user, &body.room_id)?; + } + + if let Some(event) = &body.private_read_receipt { services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, @@ -43,11 +51,9 @@ pub async fn set_read_marker_route( "Event does not exist.", ))?, )?; - services() - .rooms - .user - .reset_notification_counts(sender_user, &body.room_id)?; + } + if let Some(event) = &body.read_receipt { let mut user_receipts = BTreeMap::new(); user_receipts.insert( sender_user.clone(), @@ -83,44 +89,69 @@ pub async fn create_receipt_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - services().rooms.edus.read_receipt.private_read_set( - &body.room_id, - sender_user, + if matches!( + &body.receipt_type, + create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate + ) { services() .rooms - .timeline - .get_pdu_count(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, - )?; - services() - .rooms - .user - .reset_notification_counts(sender_user, &body.room_id)?; + .user + .reset_notification_counts(sender_user, &body.room_id)?; + } - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_user.clone(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - }, - ); - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); + match body.receipt_type { + create_receipt::v3::ReceiptType::FullyRead => { + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: body.event_id.clone(), + }, + }; + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + )?; + } + create_receipt::v3::ReceiptType::Read => { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + }, + ); + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(body.event_id.to_owned(), receipts); + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(body.event_id.to_owned(), receipts); - services().rooms.edus.read_receipt.readreceipt_update( - sender_user, - &body.room_id, - ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )?; + services().rooms.edus.read_receipt.readreceipt_update( + sender_user, + &body.room_id, + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )?; + } + create_receipt::v3::ReceiptType::ReadPrivate => { + services().rooms.edus.read_receipt.private_read_set( + &body.room_id, + sender_user, + services() + .rooms + .timeline + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, + )?; + } + _ => return Err(Error::bad_database("Unsupported receipt type")), + } Ok(create_receipt::v3::Response {}) } diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index ca191d6a..43b2e8e6 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,6 +1,8 @@ use crate::{ api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma, }; +use ruma::serde::JsonObject; +use ruma::OwnedRoomAliasId; use ruma::{ api::client::{ error::ErrorKind, @@ -21,9 +23,7 @@ use ruma::{ }, RoomEventType, StateEventType, }, - int, - serde::{CanonicalJsonObject, JsonObject}, - RoomAliasId, RoomId, + int, CanonicalJsonObject, RoomAliasId, RoomId, }; use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; @@ -77,7 +77,7 @@ pub async fn create_room_route( )); } - let alias: Option> = + let alias: Option = body.room_alias_name .as_ref() .map_or(Ok(None), |localpart| { diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 14f1404f..61825167 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -147,6 +147,8 @@ pub async fn login_route(body: Ruma) -> Result, - sender_device: Box, + sender_user: OwnedUserId, + sender_device: OwnedDeviceId, body: sync_events::v3::IncomingRequest, tx: Sender>>, ) { @@ -155,15 +155,14 @@ async fn sync_helper_wrapper( } async fn sync_helper( - sender_user: Box, - sender_device: Box, + sender_user: OwnedUserId, + sender_device: OwnedDeviceId, body: sync_events::v3::IncomingRequest, // bool = caching allowed ) -> Result<(sync_events::v3::Response, bool), Error> { use sync_events::v3::{ - DeviceLists, Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, - JoinedRoom, LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, - ToDevice, UnreadNotificationsCount, + Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, JoinedRoom, + LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, }; // TODO: match body.set_presence { @@ -444,7 +443,7 @@ async fn sync_helper( }; // This check is in case a bad user ID made it into the database - if let Ok(uid) = UserId::parse(state_key.as_ref()) { + if let Ok(uid) = UserId::parse(&state_key) { lazy_loaded.insert(uid); } state_events.push(pdu); diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 2d986a5c..c71d36b9 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -17,8 +17,7 @@ use bytes::{BufMut, Bytes, BytesMut}; use http::StatusCode; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, - signatures::CanonicalJsonValue, - DeviceId, ServerName, UserId, + CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, ServerName, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; @@ -81,7 +80,7 @@ where let (sender_user, sender_device, sender_servername, from_appservice) = if let Some((_id, registration)) = appservice_registration { match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + AuthScheme::AccessToken => { let user_id = query_params.user_id.map_or_else( || { UserId::parse_with_server_name( @@ -112,7 +111,7 @@ where } } else { match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { + AuthScheme::AccessToken => { let token = match token { Some(token) => token, _ => { @@ -132,7 +131,7 @@ where } Some((user_id, device_id)) => ( Some(user_id), - Some(Box::::from(device_id)), + Some(OwnedDeviceId::from(device_id)), None, false, ), @@ -298,7 +297,7 @@ where } struct XMatrix { - origin: Box, + origin: OwnedServerName, key: String, // KeyName? sig: String, } diff --git a/src/api/ruma_wrapper/mod.rs b/src/api/ruma_wrapper/mod.rs index 15360e58..ac4c825a 100644 --- a/src/api/ruma_wrapper/mod.rs +++ b/src/api/ruma_wrapper/mod.rs @@ -1,6 +1,7 @@ use crate::Error; use ruma::{ - api::client::uiaa::UiaaResponse, signatures::CanonicalJsonValue, DeviceId, ServerName, UserId, + api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, + OwnedUserId, }; use std::ops::Deref; @@ -10,9 +11,9 @@ mod axum; /// Extractor for Ruma request structs pub struct Ruma { pub body: T, - pub sender_user: Option>, - pub sender_device: Option>, - pub sender_servername: Option>, + pub sender_user: Option, + pub sender_device: Option, + pub sender_servername: Option, // This is None when body is not a valid string pub json_body: Option, pub from_appservice: bool, diff --git a/src/api/server_server.rs b/src/api/server_server.rs index d54e1306..a8ae2726 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -33,18 +33,17 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ - receipt::{ReceiptEvent, ReceiptEventContent}, + receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, member::{MembershipState, RoomMemberEventContent}, }, RoomEventType, StateEventType, }, - receipt::ReceiptType, serde::{Base64, JsonObject, Raw}, - signatures::CanonicalJsonValue, to_device::DeviceIdOrAllDevices, - EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, + CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, + OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, ServerSigningKeyId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -280,7 +279,7 @@ where .write() .unwrap() .insert( - Box::::from(destination), + OwnedServerName::from(destination), (actual_destination, host), ); } @@ -528,7 +527,7 @@ pub async fn get_server_keys_route() -> Result { return Err(Error::bad_config("Federation is disabled.")); } - let mut verify_keys: BTreeMap, VerifyKey> = BTreeMap::new(); + let mut verify_keys: BTreeMap = BTreeMap::new(); verify_keys.insert( format!("ed25519:{}", services().globals.keypair().version()) .try_into() @@ -669,7 +668,7 @@ pub async fn send_transaction_message_route( }; // 0. Check the server is in the room - let room_id = match value + let room_id: OwnedRoomId = match value .get("room_id") .and_then(|id| RoomId::parse(id.as_str()?).ok()) { @@ -1007,7 +1006,7 @@ pub async fn get_missing_events_route( continue; } queued_events.extend_from_slice( - &serde_json::from_value::>>( + &serde_json::from_value::>( serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { Error::bad_database("Event in db has no prev_events field.") })?) @@ -1411,7 +1410,7 @@ async fn create_join_event( } }; - let origin: Box = serde_json::from_value( + let origin: OwnedServerName = serde_json::from_value( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event needs an origin field.", @@ -1474,6 +1473,7 @@ async fn create_join_event( .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), + origin: services().globals.server_name().to_string(), }) } @@ -1564,10 +1564,10 @@ pub async fn create_invite_route( // Add event_id back signed_event.insert( "event_id".to_owned(), - CanonicalJsonValue::String(event_id.into()), + CanonicalJsonValue::String(event_id.to_string()), ); - let sender: Box<_> = serde_json::from_value( + let sender: OwnedUserId = serde_json::from_value( signed_event .get("sender") .ok_or(Error::BadRequest( diff --git a/src/config/mod.rs b/src/config/mod.rs index 29af8839..e0efa60f 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -4,7 +4,7 @@ use std::{ net::{IpAddr, Ipv4Addr}, }; -use ruma::{RoomVersionId, ServerName}; +use ruma::{OwnedServerName, RoomVersionId, ServerName}; use serde::{de::IgnoredAny, Deserialize}; use tracing::warn; @@ -20,7 +20,7 @@ pub struct Config { pub port: u16, pub tls: Option, - pub server_name: Box, + pub server_name: OwnedServerName, #[serde(default = "default_database_backend")] pub database_backend: String, pub database_path: String, @@ -58,7 +58,7 @@ pub struct Config { pub proxy: ProxyConfig, pub jwt_secret: Option, #[serde(default = "Vec::new")] - pub trusted_servers: Vec>, + pub trusted_servers: Vec, #[serde(default = "default_log")] pub log: String, #[serde(default)] diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index fafaf49e..75d00b4d 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -5,7 +5,8 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, - DeviceId, MilliSecondsSinceUnixEpoch, ServerName, ServerSigningKeyId, UserId, + DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, + UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -163,7 +164,7 @@ impl service::globals::Data for KeyValueDatabase { &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result, VerifyKey>> { + ) -> Result> { // Not atomic, but this is not critical let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; @@ -202,7 +203,7 @@ impl service::globals::Data for KeyValueDatabase { fn signing_keys_for( &self, origin: &ServerName, - ) -> Result, VerifyKey>> { + ) -> Result> { let signingkeys = self .server_signingkeys .get(origin.as_bytes())? diff --git a/src/database/key_value/key_backups.rs b/src/database/key_value/key_backups.rs index 0738f730..900b700b 100644 --- a/src/database/key_value/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -6,7 +6,7 @@ use ruma::{ error::ErrorKind, }, serde::Raw, - RoomId, UserId, + OwnedRoomId, RoomId, UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -198,13 +198,13 @@ impl service::key_backups::Data for KeyValueDatabase { &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>> { + ) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::, RoomKeyBackup>::new(); + let mut rooms = BTreeMap::::new(); for result in self .backupkeyid_backup diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index f3de89da..c0f6de89 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -1,4 +1,4 @@ -use ruma::{api::client::error::ErrorKind, RoomAliasId, RoomId}; +use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -31,7 +31,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { Ok(()) } - fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { self.alias_roomid .get(alias.alias().as_bytes())? .map(|bytes| { @@ -46,7 +46,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs index 212ced91..e05dee82 100644 --- a/src/database/key_value/rooms/directory.rs +++ b/src/database/key_value/rooms/directory.rs @@ -1,4 +1,4 @@ -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; use crate::{database::KeyValueDatabase, service, utils, Error, Result}; @@ -15,7 +15,7 @@ impl service::rooms::directory::Data for KeyValueDatabase { Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) } - fn public_rooms<'a>(&'a self) -> Box>> + 'a> { + fn public_rooms<'a>(&'a self) -> Box> + 'a> { Box::new(self.publicroomids.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index fdd51ce1..5259beff 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,6 +1,8 @@ use std::collections::HashMap; -use ruma::{events::presence::PresenceEvent, presence::PresenceState, RoomId, UInt, UserId}; +use ruma::{ + events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, +}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -76,7 +78,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { &self, room_id: &RoomId, since: u64, - ) -> Result, PresenceEvent>> { + ) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index a8349f6e..fa97ea34 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -1,7 +1,7 @@ use std::mem; use ruma::{ - events::receipt::ReceiptEvent, serde::Raw, signatures::CanonicalJsonObject, RoomId, UserId, + events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, OwnedUserId, RoomId, UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -55,7 +55,7 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { ) -> Box< dyn Iterator< Item = Result<( - Box, + OwnedUserId, u64, Raw, )>, diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 7b211e7c..4e6c86b4 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -1,6 +1,6 @@ use std::collections::HashSet; -use ruma::{RoomId, UserId}; +use ruma::{OwnedUserId, RoomId, UserId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -66,7 +66,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { .unwrap_or(0)) } - fn typings_all(&self, room_id: &RoomId) -> Result>> { + fn typings_all(&self, room_id: &RoomId) -> Result> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs index 0f61dbb4..57540c40 100644 --- a/src/database/key_value/rooms/metadata.rs +++ b/src/database/key_value/rooms/metadata.rs @@ -1,4 +1,4 @@ -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -18,7 +18,7 @@ impl service::rooms::metadata::Data for KeyValueDatabase { .is_some()) } - fn iter_ids<'a>(&'a self) -> Box>> + 'a> { + fn iter_ids<'a>(&'a self) -> Box> + 'a> { Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { RoomId::parse( utils::string_from_bytes(&bytes).map_err(|_| { diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs index 2ecaadbb..7985ba81 100644 --- a/src/database/key_value/rooms/outlier.rs +++ b/src/database/key_value/rooms/outlier.rs @@ -1,4 +1,4 @@ -use ruma::{signatures::CanonicalJsonObject, EventId}; +use ruma::{CanonicalJsonObject, EventId}; use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs index dbc1398c..f17d37bb 100644 --- a/src/database/key_value/rooms/state.rs +++ b/src/database/key_value/rooms/state.rs @@ -1,4 +1,4 @@ -use ruma::{EventId, RoomId}; +use ruma::{EventId, OwnedEventId, RoomId}; use std::collections::HashSet; use std::sync::Arc; @@ -52,7 +52,7 @@ impl service::rooms::state::Data for KeyValueDatabase { fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: Vec>, + event_ids: Vec, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { let mut prefix = room_id.as_bytes().to_vec(); diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index cbc05764..ff4594f0 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -4,7 +4,7 @@ use regex::Regex; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, - RoomId, ServerName, UserId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -163,7 +163,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } #[tracing::instrument(skip(self, room_id))] - fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { + fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { let maybe = self .our_real_users_cache .read() @@ -262,7 +262,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn room_servers<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -295,7 +295,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn server_rooms<'a>( &'a self, server: &ServerName, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = server.as_bytes().to_vec(); prefix.push(0xff); @@ -317,7 +317,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn room_members<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -363,7 +363,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -393,7 +393,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn room_members_invited<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -451,7 +451,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn rooms_joined<'a>( &'a self, user_id: &UserId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { Box::new( self.userroomid_joined .scan_prefix(user_id.as_bytes().to_vec()) @@ -476,7 +476,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> Box, Vec>)>> + 'a> { + ) -> Box>)>> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -554,7 +554,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> Box, Vec>)>> + 'a> { + ) -> Box>)>> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 1660a9ec..0c6c2dde 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -1,7 +1,7 @@ use std::{collections::hash_map, mem::size_of, sync::Arc}; use ruma::{ - api::client::error::ErrorKind, signatures::CanonicalJsonObject, EventId, RoomId, UserId, + api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, }; use tracing::error; @@ -344,8 +344,8 @@ impl service::rooms::timeline::Data for KeyValueDatabase { fn increment_notification_counts( &self, room_id: &RoomId, - notifies: Vec>, - highlights: Vec>, + notifies: Vec, + highlights: Vec, ) -> Result<()> { let mut notifies_batch = Vec::new(); let mut highlights_batch = Vec::new(); diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 9230e611..e678c878 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -1,4 +1,4 @@ -use ruma::{RoomId, UserId}; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -85,8 +85,8 @@ impl service::rooms::user::Data for KeyValueDatabase { fn get_shared_rooms<'a>( &'a self, - users: Vec>, - ) -> Result>> + 'a>> { + users: Vec, + ) -> Result> + 'a>> { let iterators = users.into_iter().map(move |user_id| { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); @@ -110,7 +110,7 @@ impl service::rooms::user::Data for KeyValueDatabase { }); // We use the default compare function because keys are sorted correctly (not reversed) - Ok(Box::new(Box::new( + Ok(Box::new( utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { @@ -119,6 +119,6 @@ impl service::rooms::user::Data for KeyValueDatabase { })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) }), - ))) + )) } } diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs index 8a9f1762..5fd91b07 100644 --- a/src/database/key_value/uiaa.rs +++ b/src/database/key_value/uiaa.rs @@ -1,7 +1,6 @@ use ruma::{ api::client::{error::ErrorKind, uiaa::UiaaInfo}, - signatures::CanonicalJsonValue, - DeviceId, UserId, + CanonicalJsonValue, DeviceId, UserId, }; use crate::{database::KeyValueDatabase, service, Error, Result}; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 8213c5d7..3bb8e616 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,8 +5,10 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, OwnedUserId, + UInt, UserId, }; +use ruma::{OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri}; use tracing::warn; use crate::{ @@ -39,7 +41,7 @@ impl service::users::Data for KeyValueDatabase { } /// Find out which user an access token belongs to. - fn find_from_token(&self, token: &str) -> Result, String)>> { + fn find_from_token(&self, token: &str) -> Result> { self.token_userdeviceid .get(token.as_bytes())? .map_or(Ok(None), |bytes| { @@ -66,7 +68,7 @@ impl service::users::Data for KeyValueDatabase { } /// Returns an iterator over all users on this homeserver. - fn iter<'a>(&'a self) -> Box>> + 'a> { + fn iter<'a>(&'a self) -> Box> + 'a> { Box::new(self.userid_password.iter().map(|(bytes, _)| { UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") @@ -141,7 +143,7 @@ impl service::users::Data for KeyValueDatabase { } /// Get the avatar_url of a user. - fn avatar_url(&self, user_id: &UserId) -> Result>> { + fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { @@ -154,7 +156,7 @@ impl service::users::Data for KeyValueDatabase { } /// Sets a new avatar_url or removes it if avatar_url is None. - fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; @@ -258,7 +260,7 @@ impl service::users::Data for KeyValueDatabase { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata @@ -356,7 +358,7 @@ impl service::users::Data for KeyValueDatabase { user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result, Raw)>> { + ) -> Result)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); @@ -407,7 +409,7 @@ impl service::users::Data for KeyValueDatabase { .scan_prefix(userdeviceid) .map(|(bytes, _)| { Ok::<_, Error>( - serde_json::from_slice::>( + serde_json::from_slice::( &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { Error::bad_database("OneTimeKey ID in db is invalid.") })?, @@ -579,7 +581,7 @@ impl service::users::Data for KeyValueDatabase { .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? .as_object_mut() .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.to_owned()) + .entry(sender_id.to_string()) .or_insert_with(|| serde_json::Map::new().into()); signatures @@ -603,7 +605,7 @@ impl service::users::Data for KeyValueDatabase { user_or_room_id: &str, from: u64, to: Option, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); diff --git a/src/database/mod.rs b/src/database/mod.rs index 967ec885..f4ca44f1 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -6,13 +6,17 @@ use abstraction::KeyValueDatabaseEngine; use abstraction::KvTree; use directories::ProjectDirs; use lru_cache::LruCache; +use ruma::CanonicalJsonValue; +use ruma::OwnedDeviceId; +use ruma::OwnedEventId; +use ruma::OwnedRoomId; +use ruma::OwnedUserId; use ruma::{ events::{ push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, - signatures::CanonicalJsonValue, DeviceId, EventId, RoomId, UserId, }; use std::{ @@ -58,7 +62,7 @@ pub struct KeyValueDatabase { //pub uiaa: uiaa::Uiaa, pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication pub(super) userdevicesessionid_uiaarequest: - RwLock, Box, String), CanonicalJsonValue>>, + RwLock>, //pub edus: RoomEdus, pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId @@ -152,15 +156,15 @@ pub struct KeyValueDatabase { pub(super) senderkey_pusher: Arc, pub(super) cached_registrations: Arc>>, - pub(super) pdu_cache: Mutex, Arc>>, + pub(super) pdu_cache: Mutex>>, pub(super) shorteventid_cache: Mutex>>, pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex, u64>>, + pub(super) eventidshort_cache: Mutex>, pub(super) statekeyshort_cache: Mutex>, pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock, Arc>>>>, - pub(super) appservice_in_room_cache: RwLock, HashMap>>, - pub(super) lasttimelinecount_cache: Mutex, u64>>, + pub(super) our_real_users_cache: RwLock>>>, + pub(super) appservice_in_room_cache: RwLock>>, + pub(super) lasttimelinecount_cache: Mutex>, } impl KeyValueDatabase { @@ -531,7 +535,7 @@ impl KeyValueDatabase { if services().globals.database_version()? < 7 { // Upgrade state store - let mut last_roomstates: HashMap, u64> = HashMap::new(); + let mut last_roomstates: HashMap = HashMap::new(); let mut current_sstatehash: Option = None; let mut current_room = None; let mut current_state = HashSet::new(); diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 218a4ea4..9e3f586a 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -23,7 +23,7 @@ use ruma::{ }, RoomEventType, }, - EventId, RoomAliasId, RoomId, RoomName, RoomVersionId, ServerName, UserId, + EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; use tokio::sync::{mpsc, Mutex, MutexGuard}; @@ -977,8 +977,7 @@ impl Service { )?; // 5. Events implied by name and topic - let room_name = RoomName::parse(format!("{} Admin Room", services().globals.server_name())) - .expect("Room name is valid"); + let room_name = format!("{} Admin Room", services().globals.server_name()); services().rooms.timeline.build_and_append_pdu( PduBuilder { event_type: RoomEventType::RoomName, @@ -1010,7 +1009,7 @@ impl Service { )?; // 6. Room alias - let alias: Box = format!("#admins:{}", services().globals.server_name()) + let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index 407ff1c4..f333254f 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, - DeviceId, ServerName, ServerSigningKeyId, UserId, + DeviceId, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, UserId, }; use crate::Result; @@ -22,13 +22,13 @@ pub trait Data: Send + Sync { &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result, VerifyKey>>; + ) -> Result>; /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. fn signing_keys_for( &self, origin: &ServerName, - ) -> Result, VerifyKey>>; + ) -> Result>; fn database_version(&self) -> Result; fn bump_database_version(&self, new_version: u64) -> Result<()>; } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index f88fd023..e7daff8c 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -1,5 +1,8 @@ mod data; pub use data::Data; +use ruma::{ + OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, +}; use crate::api::server_server::FedDest; @@ -24,7 +27,7 @@ use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; use tracing::error; use trust_dns_resolver::TokioAsyncResolver; -type WellKnownMap = HashMap, (FedDest, String)>; +type WellKnownMap = HashMap; type TlsNameMap = HashMap, u16)>; type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries type SyncHandle = ( @@ -45,14 +48,14 @@ pub struct Service { default_client: reqwest::Client, pub stable_room_versions: Vec, pub unstable_room_versions: Vec, - pub bad_event_ratelimiter: Arc, RateLimitState>>>, + pub bad_event_ratelimiter: Arc>>, pub bad_signature_ratelimiter: Arc, RateLimitState>>>, - pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock, Box), SyncHandle>>, - pub roomid_mutex_insert: RwLock, Arc>>>, - pub roomid_mutex_state: RwLock, Arc>>>, - pub roomid_mutex_federation: RwLock, Arc>>>, // this lock will be held longer - pub roomid_federationhandletime: RwLock, (Box, Instant)>>, + pub servername_ratelimiter: Arc>>>, + pub sync_receivers: RwLock>, + pub roomid_mutex_insert: RwLock>>>, + pub roomid_mutex_state: RwLock>>>, + pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub roomid_federationhandletime: RwLock>, pub stateres_mutex: Arc>, pub rotate: RotationHandler, } @@ -242,7 +245,7 @@ impl Service { self.config.default_room_version.clone() } - pub fn trusted_servers(&self) -> &[Box] { + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } @@ -295,7 +298,7 @@ impl Service { &self, origin: &ServerName, new_keys: ServerSigningKeys, - ) -> Result, VerifyKey>> { + ) -> Result> { self.db.add_signing_key(origin, new_keys) } @@ -303,7 +306,7 @@ impl Service { pub fn signing_keys_for( &self, origin: &ServerName, - ) -> Result, VerifyKey>> { + ) -> Result> { self.db.signing_keys_for(origin) } diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs index f711e5d9..bf640015 100644 --- a/src/service/key_backups/data.rs +++ b/src/service/key_backups/data.rs @@ -4,7 +4,7 @@ use crate::Result; use ruma::{ api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, - RoomId, UserId, + OwnedRoomId, RoomId, UserId, }; pub trait Data: Send + Sync { @@ -47,7 +47,7 @@ pub trait Data: Send + Sync { &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>>; + ) -> Result>; fn get_room( &self, diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs index fef46130..5fc52ced 100644 --- a/src/service/key_backups/mod.rs +++ b/src/service/key_backups/mod.rs @@ -5,7 +5,7 @@ use crate::Result; use ruma::{ api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, serde::Raw, - RoomId, UserId, + OwnedRoomId, RoomId, UserId, }; use std::collections::BTreeMap; @@ -78,7 +78,7 @@ impl Service { &self, user_id: &UserId, version: &str, - ) -> Result, RoomKeyBackup>> { + ) -> Result> { self.db.get_all(user_id, version) } diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 724b2b21..593a687b 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -1,11 +1,13 @@ use crate::{services, Error}; use ruma::{ events::{ - room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyRoomEvent, AnyStateEvent, - AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, RoomEventType, StateEvent, + room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyStateEvent, + AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, + RoomEventType, StateEvent, }, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, UInt, UserId, + serde::Raw, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, + OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{ @@ -25,8 +27,8 @@ pub struct EventHash { #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { pub event_id: Arc, - pub room_id: Box, - pub sender: Box, + pub room_id: OwnedRoomId, + pub sender: OwnedUserId, pub origin_server_ts: UInt, #[serde(rename = "type")] pub kind: RoomEventType, @@ -102,7 +104,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_sync_room_event(&self) -> Raw { + pub fn to_sync_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -146,7 +148,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_room_event(&self) -> Raw { + pub fn to_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -332,7 +334,7 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, -) -> crate::Result<(Box, CanonicalJsonObject)> { +) -> crate::Result<(OwnedEventId, CanonicalJsonObject)> { let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 385a2071..2d2fa1f9 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -1,5 +1,6 @@ mod data; pub use data::Data; +use ruma::events::AnySyncTimelineEvent; use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; @@ -15,7 +16,7 @@ use ruma::{ }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, RoomEventType, StateEventType, + RoomEventType, StateEventType, }, push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, serde::Raw, @@ -195,12 +196,13 @@ impl Service { user: &UserId, ruleset: &'a Ruleset, power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, + pdu: &Raw, room_id: &RoomId, ) -> Result<&'a [Action]> { let ctx = PushConditionRoomCtx { room_id: room_id.to_owned(), member_count: 10_u32.into(), // TODO: get member count efficiently + user_id: user.to_owned(), user_display_name: services() .users .displayname(user)? @@ -242,7 +244,7 @@ impl Service { let mut data_minus_url = pusher.data.clone(); // The url must be stripped off according to spec data_minus_url.url = None; - device.data = data_minus_url; + device.data = data_minus_url.into(); // Tweaks are only added if the format is NOT event_id_only if !event_id_only { diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs index 6299add7..629b1ee1 100644 --- a/src/service/rooms/alias/data.rs +++ b/src/service/rooms/alias/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{RoomAliasId, RoomId}; +use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; pub trait Data: Send + Sync { /// Creates or updates the alias to the given room id. @@ -9,11 +9,11 @@ pub trait Data: Send + Sync { fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; /// Looks up the roomid for the given alias. - fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>>; + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>; /// Returns all local aliases that point to the given room fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; } diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs index 6b52549a..d26030c0 100644 --- a/src/service/rooms/alias/mod.rs +++ b/src/service/rooms/alias/mod.rs @@ -3,7 +3,7 @@ mod data; pub use data::Data; use crate::Result; -use ruma::{RoomAliasId, RoomId}; +use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; pub struct Service { pub db: &'static dyn Data, @@ -21,7 +21,7 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>> { + pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { self.db.resolve_local_alias(alias) } @@ -29,7 +29,7 @@ impl Service { pub fn local_aliases_for_room<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a> { + ) -> Box> + 'a> { self.db.local_aliases_for_room(room_id) } } diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs index 320c6db1..aca731ce 100644 --- a/src/service/rooms/directory/data.rs +++ b/src/service/rooms/directory/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; pub trait Data: Send + Sync { /// Adds the room to the public room directory @@ -12,5 +12,5 @@ pub trait Data: Send + Sync { fn is_public_room(&self, room_id: &RoomId) -> Result; /// Returns the unsorted public room directory - fn public_rooms<'a>(&'a self) -> Box>> + 'a>; + fn public_rooms<'a>(&'a self) -> Box> + 'a>; } diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs index 0c1b2cd4..10f782bb 100644 --- a/src/service/rooms/directory/mod.rs +++ b/src/service/rooms/directory/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; use crate::Result; @@ -26,7 +26,7 @@ impl Service { } #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator>> + '_ { + pub fn public_rooms(&self) -> impl Iterator> + '_ { self.db.public_rooms() } } diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index f3784040..53329e08 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,7 +1,7 @@ use std::collections::HashMap; use crate::Result; -use ruma::{events::presence::PresenceEvent, RoomId, UserId}; +use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. @@ -34,5 +34,5 @@ pub trait Data: Send + Sync { &self, room_id: &RoomId, since: u64, - ) -> Result, PresenceEvent>>; + ) -> Result>; } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 36814309..860aea18 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -2,7 +2,7 @@ mod data; use std::collections::HashMap; pub use data::Data; -use ruma::{events::presence::PresenceEvent, RoomId, UserId}; +use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; use crate::Result; @@ -116,7 +116,7 @@ impl Service { &self, room_id: &RoomId, since: u64, - ) -> Result, PresenceEvent>> { + ) -> Result> { self.db.presence_since(room_id, since) } } diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index 800c035f..a183d196 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { /// Replaces the previous read receipt. @@ -18,7 +18,7 @@ pub trait Data: Send + Sync { ) -> Box< dyn Iterator< Item = Result<( - Box, + OwnedUserId, u64, Raw, )>, diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index 1b3ddb12..c6035280 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -3,7 +3,7 @@ mod data; pub use data::Data; use crate::Result; -use ruma::{events::receipt::ReceiptEvent, serde::Raw, RoomId, UserId}; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; pub struct Service { pub db: &'static dyn Data, @@ -28,7 +28,7 @@ impl Service { since: u64, ) -> impl Iterator< Item = Result<( - Box, + OwnedUserId, u64, Raw, )>, diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs index 50b6d13e..c4ad8673 100644 --- a/src/service/rooms/edus/typing/data.rs +++ b/src/service/rooms/edus/typing/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{RoomId, UserId}; +use ruma::{OwnedUserId, RoomId, UserId}; use std::collections::HashSet; pub trait Data: Send + Sync { @@ -14,5 +14,5 @@ pub trait Data: Send + Sync { fn last_typing_update(&self, room_id: &RoomId) -> Result; /// Returns all user ids currently typing. - fn typings_all(&self, room_id: &RoomId) -> Result>>; + fn typings_all(&self, room_id: &RoomId) -> Result>; } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index cfe0fbf4..2d831f7b 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -3,7 +3,7 @@ type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; use ruma::{ api::federation::discovery::{get_remote_server_keys, get_server_keys}, - signatures::CanonicalJsonObject, + CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, OwnedServerSigningKeyId, RoomVersionId, }; use std::{ @@ -30,7 +30,6 @@ use ruma::{ }, int, serde::Base64, - signatures::CanonicalJsonValue, state_res::{self, RoomVersion, StateMap}, uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, }; @@ -300,7 +299,7 @@ impl Service { Ok(ruma::signatures::Verified::Signatures) => { // Redact warn!("Calculated hash does not match: {}", event_id); - match ruma::signatures::redact(&value, room_version_id) { + match ruma::canonical_json::redact(&value, room_version_id) { Ok(obj) => obj, Err(_) => { return Err(Error::BadRequest( @@ -974,7 +973,11 @@ impl Service { .rooms .state_compressor .save_state(room_id, new_room_state)?; - services().rooms.state.force_state(room_id, sstatehash, new, removed, &state_lock).await?; + services() + .rooms + .state + .force_state(room_id, sstatehash, new, removed, &state_lock) + .await?; } } @@ -1322,7 +1325,7 @@ impl Service { fn get_server_keys_from_cache( &self, pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap, QueryCriteria>>, + servers: &mut BTreeMap>, room_version: &RoomVersionId, pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, ) -> Result<()> { @@ -1414,8 +1417,8 @@ impl Service { pub_key_map: &RwLock>>, ) -> Result<()> { let mut servers: BTreeMap< - Box, - BTreeMap, QueryCriteria>, + OwnedServerName, + BTreeMap, > = BTreeMap::new(); { diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index b30bb9c1..701a7340 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -5,7 +5,7 @@ use std::{ }; pub use data::Data; -use ruma::{DeviceId, RoomId, UserId}; +use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::Result; @@ -13,7 +13,7 @@ pub struct Service { pub db: &'static dyn Data, pub lazy_load_waiting: - Mutex, Box, Box, u64), HashSet>>>, + Mutex>>, } impl Service { @@ -35,7 +35,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, room_id: &RoomId, - lazy_load: HashSet>, + lazy_load: HashSet, count: u64, ) { self.lazy_load_waiting.lock().unwrap().insert( diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs index df416dac..339db573 100644 --- a/src/service/rooms/metadata/data.rs +++ b/src/service/rooms/metadata/data.rs @@ -1,9 +1,9 @@ use crate::Result; -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; pub trait Data: Send + Sync { fn exists(&self, room_id: &RoomId) -> Result; - fn iter_ids<'a>(&'a self) -> Box>> + 'a>; + fn iter_ids<'a>(&'a self) -> Box> + 'a>; fn is_disabled(&self, room_id: &RoomId) -> Result; fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; } diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs index c99ae4a2..d1884691 100644 --- a/src/service/rooms/metadata/mod.rs +++ b/src/service/rooms/metadata/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use ruma::RoomId; +use ruma::{OwnedRoomId, RoomId}; use crate::Result; @@ -16,7 +16,7 @@ impl Service { self.db.exists(room_id) } - pub fn iter_ids<'a>(&'a self) -> Box>> + 'a> { + pub fn iter_ids<'a>(&'a self) -> Box> + 'a> { self.db.iter_ids() } diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs index edc7c4fd..0ed521dd 100644 --- a/src/service/rooms/outlier/data.rs +++ b/src/service/rooms/outlier/data.rs @@ -1,4 +1,4 @@ -use ruma::{signatures::CanonicalJsonObject, EventId}; +use ruma::{CanonicalJsonObject, EventId}; use crate::{PduEvent, Result}; diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs index c84e975a..dae41e4b 100644 --- a/src/service/rooms/outlier/mod.rs +++ b/src/service/rooms/outlier/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use ruma::{signatures::CanonicalJsonObject, EventId}; +use ruma::{CanonicalJsonObject, EventId}; use crate::{PduEvent, Result}; diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 8e80b5e3..19a1e30a 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{EventId, RoomId}; +use ruma::{EventId, OwnedEventId, RoomId}; use std::collections::HashSet; use std::sync::Arc; use tokio::sync::MutexGuard; @@ -26,7 +26,7 @@ pub trait Data: Send + Sync { fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: Vec>, + event_ids: Vec, _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()>; } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 15fa79b8..2c49c35a 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -12,7 +12,7 @@ use ruma::{ }, serde::Raw, state_res::{self, StateMap}, - EventId, RoomId, RoomVersionId, UserId, + EventId, OwnedEventId, RoomId, RoomVersionId, UserId, }; use serde::Deserialize; use tokio::sync::MutexGuard; @@ -346,7 +346,7 @@ impl Service { pub fn set_forward_extremities<'a>( &self, room_id: &RoomId, - event_ids: Vec>, + event_ids: Vec, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result<()> { self.db diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index a6b06a53..42de56d2 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -4,7 +4,7 @@ use crate::Result; use ruma::{ events::{AnyStrippedStateEvent, AnySyncStateEvent}, serde::Raw, - RoomId, ServerName, UserId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; pub trait Data: Send + Sync { @@ -20,7 +20,7 @@ pub trait Data: Send + Sync { fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; - fn get_our_real_users(&self, room_id: &RoomId) -> Result>>>; + fn get_our_real_users(&self, room_id: &RoomId) -> Result>>; fn appservice_in_room( &self, @@ -35,7 +35,7 @@ pub trait Data: Send + Sync { fn room_servers<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result; @@ -43,13 +43,13 @@ pub trait Data: Send + Sync { fn server_rooms<'a>( &'a self, server: &ServerName, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; /// Returns an iterator over all joined members of a room. fn room_members<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; fn room_joined_count(&self, room_id: &RoomId) -> Result>; @@ -59,13 +59,13 @@ pub trait Data: Send + Sync { fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; /// Returns an iterator over all invited members of a room. fn room_members_invited<'a>( &'a self, room_id: &RoomId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; @@ -75,13 +75,13 @@ pub trait Data: Send + Sync { fn rooms_joined<'a>( &'a self, user_id: &UserId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; /// Returns an iterator over all rooms a user was invited to. fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> Box, Vec>)>> + 'a>; + ) -> Box>)>> + 'a>; fn invite_state( &self, @@ -99,7 +99,7 @@ pub trait Data: Send + Sync { fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> Box, Vec>)>> + 'a>; + ) -> Box>)>> + 'a>; fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 9431d3a1..6c9bed35 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -12,7 +12,7 @@ use ruma::{ RoomAccountDataEventType, StateEventType, }, serde::Raw, - RoomId, ServerName, UserId, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; use crate::{services, Error, Result}; @@ -192,7 +192,7 @@ impl Service { } #[tracing::instrument(skip(self, room_id))] - pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>>> { + pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { self.db.get_our_real_users(room_id) } @@ -216,7 +216,7 @@ impl Service { pub fn room_servers<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.room_servers(room_id) } @@ -230,7 +230,7 @@ impl Service { pub fn server_rooms<'a>( &'a self, server: &ServerName, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.server_rooms(server) } @@ -239,7 +239,7 @@ impl Service { pub fn room_members<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.room_members(room_id) } @@ -258,7 +258,7 @@ impl Service { pub fn room_useroncejoined<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.room_useroncejoined(room_id) } @@ -267,7 +267,7 @@ impl Service { pub fn room_members_invited<'a>( &'a self, room_id: &RoomId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.room_members_invited(room_id) } @@ -286,7 +286,7 @@ impl Service { pub fn rooms_joined<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.rooms_joined(user_id) } @@ -295,7 +295,7 @@ impl Service { pub fn rooms_invited<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { + ) -> impl Iterator>)>> + 'a { self.db.rooms_invited(user_id) } @@ -322,7 +322,7 @@ impl Service { pub fn rooms_left<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator, Vec>)>> + 'a { + ) -> impl Iterator>)>> + 'a { self.db.rooms_left(user_id) } diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index bcd3b9a1..356f32c8 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -251,7 +251,11 @@ impl Service { &self, room_id: &RoomId, new_state_ids_compressed: HashSet, - ) -> Result<(u64, HashSet, HashSet)> { + ) -> Result<( + u64, + HashSet, + HashSet, + )> { let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; let state_hash = utils::calculate_hash( diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index 095731ca..9377af07 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -1,6 +1,6 @@ use std::sync::Arc; -use ruma::{signatures::CanonicalJsonObject, EventId, RoomId, UserId}; +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; use crate::{PduEvent, Result}; @@ -81,7 +81,7 @@ pub trait Data: Send + Sync { fn increment_notification_counts( &self, room_id: &RoomId, - notifies: Vec>, - highlights: Vec>, + notifies: Vec, + highlights: Vec, ) -> Result<()>; } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 16f50d23..e96afce9 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -7,10 +7,15 @@ use std::sync::{Arc, Mutex}; pub use data::Data; use regex::Regex; +use ruma::canonical_json::to_canonical_value; use ruma::events::room::power_levels::RoomPowerLevelsEventContent; use ruma::push::Ruleset; -use ruma::signatures::CanonicalJsonValue; use ruma::state_res::RoomVersion; +use ruma::CanonicalJsonObject; +use ruma::CanonicalJsonValue; +use ruma::OwnedEventId; +use ruma::OwnedRoomId; +use ruma::OwnedServerName; use ruma::{ api::client::error::ErrorKind, events::{ @@ -19,8 +24,6 @@ use ruma::{ GlobalAccountDataEventType, RoomEventType, StateEventType, }, push::{Action, Tweak}, - serde::to_canonical_value, - signatures::CanonicalJsonObject, state_res, uint, EventId, RoomAliasId, RoomId, ServerName, UserId, }; use serde::Deserialize; @@ -38,7 +41,7 @@ use super::state_compressor::CompressedStateEvent; pub struct Service { pub db: &'static dyn Data, - pub lasttimelinecount_cache: Mutex, u64>>, + pub lasttimelinecount_cache: Mutex>, } impl Service { @@ -146,7 +149,7 @@ impl Service { &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, - leaves: Vec>, + leaves: Vec, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { let shortroomid = services() @@ -702,7 +705,7 @@ impl Service { .state .set_room_state(room_id, statehashid, state_lock)?; - let mut servers: HashSet> = services() + let mut servers: HashSet = services() .rooms .state_cache .room_servers(room_id) @@ -716,7 +719,7 @@ impl Service { .as_ref() .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) { - servers.insert(Box::from(state_key_uid.server_name())); + servers.insert(state_key_uid.server_name().to_owned()); } } @@ -735,7 +738,7 @@ impl Service { &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, - new_room_leaves: Vec>, + new_room_leaves: Vec, state_ids_compressed: HashSet, soft_fail: bool, state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 7b7841fb..43c4c92a 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{RoomId, UserId}; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; @@ -19,6 +19,6 @@ pub trait Data: Send + Sync { fn get_shared_rooms<'a>( &'a self, - users: Vec>, - ) -> Result>> + 'a>>; + users: Vec, + ) -> Result> + 'a>>; } diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 479e5568..a765cfd1 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,7 +1,7 @@ mod data; pub use data::Data; -use ruma::{RoomId, UserId}; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::Result; @@ -38,8 +38,8 @@ impl Service { pub fn get_shared_rooms<'a>( &'a self, - users: Vec>, - ) -> Result>> + 'a> { + users: Vec, + ) -> Result> + 'a> { self.db.get_shared_rooms(users) } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 60fc6f46..697ca85c 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -30,10 +30,11 @@ use ruma::{ OutgoingRequest, }, device_id, - events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, GlobalAccountDataEventType}, - push, - receipt::ReceiptType, - uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, + events::{ + push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, + GlobalAccountDataEventType, + }, + push, uint, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, ServerName, UInt, UserId, }; use tokio::{ select, @@ -44,8 +45,8 @@ use tracing::{error, warn}; #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub enum OutgoingKind { Appservice(String), - Push(Box, String), // user and pushkey - Normal(Box), + Push(OwnedUserId, String), // user and pushkey + Normal(OwnedServerName), } impl OutgoingKind { @@ -381,7 +382,7 @@ impl Service { } #[tracing::instrument(skip(self, servers, pdu_id))] - pub fn send_pdu>>( + pub fn send_pdu>( &self, servers: I, pdu_id: &[u8], diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs index 3b7eb2b7..c64deb90 100644 --- a/src/service/uiaa/data.rs +++ b/src/service/uiaa/data.rs @@ -1,5 +1,5 @@ use crate::Result; -use ruma::{api::client::uiaa::UiaaInfo, signatures::CanonicalJsonValue, DeviceId, UserId}; +use ruma::{api::client::uiaa::UiaaInfo, CanonicalJsonValue, DeviceId, UserId}; pub trait Data: Send + Sync { fn set_uiaa_request( diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs index e827cc89..672290c3 100644 --- a/src/service/uiaa/mod.rs +++ b/src/service/uiaa/mod.rs @@ -7,8 +7,7 @@ use ruma::{ error::ErrorKind, uiaa::{AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier, UiaaInfo}, }, - signatures::CanonicalJsonValue, - DeviceId, UserId, + CanonicalJsonValue, DeviceId, UserId, }; use tracing::error; diff --git a/src/service/users/data.rs b/src/service/users/data.rs index 9537ed2a..bc1db33f 100644 --- a/src/service/users/data.rs +++ b/src/service/users/data.rs @@ -4,7 +4,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, + OwnedUserId, UInt, UserId, }; use std::collections::BTreeMap; @@ -19,10 +20,10 @@ pub trait Data: Send + Sync { fn count(&self) -> Result; /// Find out which user an access token belongs to. - fn find_from_token(&self, token: &str) -> Result, String)>>; + fn find_from_token(&self, token: &str) -> Result>; /// Returns an iterator over all users on this homeserver. - fn iter<'a>(&'a self) -> Box>> + 'a>; + fn iter<'a>(&'a self) -> Box> + 'a>; /// Returns a list of local users as list of usernames. /// @@ -42,10 +43,10 @@ pub trait Data: Send + Sync { fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; /// Get the avatar_url of a user. - fn avatar_url(&self, user_id: &UserId) -> Result>>; + fn avatar_url(&self, user_id: &UserId) -> Result>; /// Sets a new avatar_url or removes it if avatar_url is None. - fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()>; + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()>; /// Get the blurhash of a user. fn blurhash(&self, user_id: &UserId) -> Result>; @@ -69,7 +70,7 @@ pub trait Data: Send + Sync { fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; /// Replaces the access token of one device. fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; @@ -89,7 +90,7 @@ pub trait Data: Send + Sync { user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result, Raw)>>; + ) -> Result)>>; fn count_one_time_keys( &self, @@ -125,7 +126,7 @@ pub trait Data: Send + Sync { user_or_room_id: &str, from: u64, to: Option, - ) -> Box>> + 'a>; + ) -> Box> + 'a>; fn mark_device_key_update(&self, user_id: &UserId) -> Result<()>; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 4bcb183b..ac66f03d 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -7,7 +7,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, RoomAliasId, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, OwnedDeviceId, OwnedDeviceKeyId, + OwnedMxcUri, OwnedUserId, RoomAliasId, UInt, UserId, }; use crate::{services, Error, Result}; @@ -56,12 +57,12 @@ impl Service { } /// Find out which user an access token belongs to. - pub fn find_from_token(&self, token: &str) -> Result, String)>> { + pub fn find_from_token(&self, token: &str) -> Result> { self.db.find_from_token(token) } /// Returns an iterator over all users on this homeserver. - pub fn iter(&self) -> impl Iterator>> + '_ { + pub fn iter(&self) -> impl Iterator> + '_ { self.db.iter() } @@ -93,12 +94,12 @@ impl Service { } /// Get the avatar_url of a user. - pub fn avatar_url(&self, user_id: &UserId) -> Result>> { + pub fn avatar_url(&self, user_id: &UserId) -> Result> { self.db.avatar_url(user_id) } /// Sets a new avatar_url or removes it if avatar_url is None. - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option>) -> Result<()> { + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { self.db.set_avatar_url(user_id, avatar_url) } @@ -133,7 +134,7 @@ impl Service { pub fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.all_device_ids(user_id) } @@ -162,7 +163,7 @@ impl Service { user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - ) -> Result, Raw)>> { + ) -> Result)>> { self.db.take_one_time_key(user_id, device_id, key_algorithm) } @@ -209,7 +210,7 @@ impl Service { user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator>> + 'a { + ) -> impl Iterator> + 'a { self.db.keys_changed(user_or_room_id, from, to) } diff --git a/src/utils/error.rs b/src/utils/error.rs index 206a055f..bd3d73c0 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -6,7 +6,7 @@ use ruma::{ error::{Error as RumaError, ErrorKind}, uiaa::{UiaaInfo, UiaaResponse}, }, - ServerName, + OwnedServerName, ServerName, }; use thiserror::Error; use tracing::{error, warn}; @@ -55,7 +55,7 @@ pub enum Error { source: reqwest::Error, }, #[error("{0}")] - FederationError(Box, RumaError), + FederationError(OwnedServerName, RumaError), #[error("Could not do this io: {source}")] IoError { #[from] diff --git a/src/utils/mod.rs b/src/utils/mod.rs index 9202eebb..0b5b1ae4 100644 --- a/src/utils/mod.rs +++ b/src/utils/mod.rs @@ -4,7 +4,7 @@ use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; use ring::digest; -use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; +use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ cmp, fmt, str::FromStr, From 076e9810ba721cfc74b3d9c677ebe08009a23481 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 17:26:53 +0200 Subject: [PATCH 402/445] cargo fix --- src/api/client_server/keys.rs | 2 +- src/api/client_server/sync.rs | 2 +- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 2 +- src/config/mod.rs | 2 +- src/database/key_value/globals.rs | 3 +-- src/database/key_value/users.rs | 4 ++-- src/database/mod.rs | 2 +- src/service/globals/data.rs | 2 +- src/service/globals/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 2 +- src/service/rooms/timeline/mod.rs | 2 +- src/service/users/mod.rs | 4 ++-- src/utils/error.rs | 2 +- 14 files changed, 16 insertions(+), 17 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 33ff309f..ef4e455d 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -14,7 +14,7 @@ use ruma::{ federation, }, serde::Raw, - DeviceId, DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, + DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use std::collections::{BTreeMap, HashMap, HashSet}; diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 0479322b..f7907ce2 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -10,7 +10,7 @@ use ruma::{ RoomEventType, StateEventType, }, serde::Raw, - DeviceId, OwnedDeviceId, OwnedUserId, RoomId, UserId, + OwnedDeviceId, OwnedUserId, RoomId, UserId, }; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index c71d36b9..818cffcb 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -17,7 +17,7 @@ use bytes::{BufMut, Bytes, BytesMut}; use http::StatusCode; use ruma::{ api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, - CanonicalJsonValue, DeviceId, OwnedDeviceId, OwnedServerName, ServerName, UserId, + CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId, }; use serde::Deserialize; use tracing::{debug, error, warn}; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index a8ae2726..513a0769 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -43,7 +43,7 @@ use ruma::{ serde::{Base64, JsonObject, Raw}, to_device::DeviceIdOrAllDevices, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, ServerSigningKeyId, + OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ diff --git a/src/config/mod.rs b/src/config/mod.rs index e0efa60f..2c31b6b1 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -4,7 +4,7 @@ use std::{ net::{IpAddr, Ipv4Addr}, }; -use ruma::{OwnedServerName, RoomVersionId, ServerName}; +use ruma::{OwnedServerName, RoomVersionId}; use serde::{de::IgnoredAny, Deserialize}; use tracing::warn; diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 75d00b4d..4332930f 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -5,8 +5,7 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, - DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, - UserId, + DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId, }; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index 3bb8e616..f7ee07cf 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,8 +5,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, MxcUri, OwnedUserId, - UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedUserId, UInt, + UserId, }; use ruma::{OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri}; use tracing::warn; diff --git a/src/database/mod.rs b/src/database/mod.rs index f4ca44f1..689ab57e 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -17,7 +17,7 @@ use ruma::{ GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, - DeviceId, EventId, RoomId, UserId, + EventId, RoomId, UserId, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs index f333254f..04371a0a 100644 --- a/src/service/globals/data.rs +++ b/src/service/globals/data.rs @@ -4,7 +4,7 @@ use async_trait::async_trait; use ruma::{ api::federation::discovery::{ServerSigningKeys, VerifyKey}, signatures::Ed25519KeyPair, - DeviceId, OwnedServerSigningKeyId, ServerName, ServerSigningKeyId, UserId, + DeviceId, OwnedServerSigningKeyId, ServerName, UserId, }; use crate::Result; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index e7daff8c..44192e01 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -12,7 +12,7 @@ use ruma::{ client::sync::sync_events, federation::discovery::{ServerSigningKeys, VerifyKey}, }, - DeviceId, EventId, RoomId, RoomVersionId, ServerName, ServerSigningKeyId, UserId, + DeviceId, RoomVersionId, ServerName, UserId, }; use std::{ collections::{BTreeMap, HashMap}, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 2d831f7b..ae63d9a1 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -31,7 +31,7 @@ use ruma::{ int, serde::Base64, state_res::{self, RoomVersion, StateMap}, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, }; use serde_json::value::RawValue as RawJsonValue; use tracing::{debug, error, info, trace, warn}; diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index e96afce9..dc859d8f 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -24,7 +24,7 @@ use ruma::{ GlobalAccountDataEventType, RoomEventType, StateEventType, }, push::{Action, Tweak}, - state_res, uint, EventId, RoomAliasId, RoomId, ServerName, UserId, + state_res, uint, EventId, RoomAliasId, RoomId, UserId, }; use serde::Deserialize; use serde_json::value::to_raw_value; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index ac66f03d..9dcfa8be 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -7,8 +7,8 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::AnyToDeviceEvent, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MxcUri, OwnedDeviceId, OwnedDeviceKeyId, - OwnedMxcUri, OwnedUserId, RoomAliasId, UInt, UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, + OwnedUserId, RoomAliasId, UInt, UserId, }; use crate::{services, Error, Result}; diff --git a/src/utils/error.rs b/src/utils/error.rs index bd3d73c0..8967acb7 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -6,7 +6,7 @@ use ruma::{ error::{Error as RumaError, ErrorKind}, uiaa::{UiaaInfo, UiaaResponse}, }, - OwnedServerName, ServerName, + OwnedServerName, }; use thiserror::Error; use tracing::{error, warn}; From 229444c9321ef32d09969eb690cab55de66a5d12 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 17:37:55 +0200 Subject: [PATCH 403/445] Use ring-compat feature so out signing keys work again --- Cargo.lock | 10 ++++++++++ Cargo.toml | 2 +- 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/Cargo.lock b/Cargo.lock index 29603eea..35bcdeaf 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2200,6 +2200,7 @@ dependencies = [ "ruma-common", "serde_json", "sha2", + "subslice", "thiserror", ] @@ -2520,6 +2521,15 @@ dependencies = [ "der", ] +[[package]] +name = "subslice" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e0a8e4809a3bb02de01f1f7faf1ba01a83af9e8eabcd4d31dd6e413d14d56aae" +dependencies = [ + "memchr", +] + [[package]] name = "subtle" version = "2.4.1" diff --git a/Cargo.toml b/Cargo.toml index 0428e746..f1ef0bb1 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat"] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } From ca82b2940d9241b99e694f3c8d597feb5bf1bbc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sun, 9 Oct 2022 21:56:56 +0200 Subject: [PATCH 404/445] fix: sending does not work We were inserting one too many 0xff bytes --- Cargo.lock | 47 ++++------------------------- Cargo.toml | 4 +-- src/database/abstraction/rocksdb.rs | 3 ++ src/database/key_value/sending.rs | 4 ++- src/database/mod.rs | 4 +-- src/service/sending/mod.rs | 3 +- 6 files changed, 18 insertions(+), 47 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 35bcdeaf..941634e0 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -198,9 +198,9 @@ dependencies = [ [[package]] name = "bindgen" -version = "0.60.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "062dddbc1ba4aca46de6338e2bf87771414c335f7b2f2036e8f3e9befebf88e6" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" dependencies = [ "bitflags", "cexpr", @@ -295,17 +295,6 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" -[[package]] -name = "bzip2-sys" -version = "0.1.11+1.0.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "736a955f3fa7875102d57c82b8cac37ec45224a07fd32d58f9f7a186b6cd4cdc" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "cc" version = "1.0.73" @@ -1283,17 +1272,14 @@ dependencies = [ [[package]] name = "librocksdb-sys" -version = "0.8.0+7.4.4" +version = "6.20.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "611804e4666a25136fcc5f8cf425ab4d26c7f74ea245ffe92ea23b85b6420b5d" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" dependencies = [ "bindgen", - "bzip2-sys", "cc", "glob", "libc", - "libz-sys", - "zstd-sys", ] [[package]] @@ -1307,17 +1293,6 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "libz-sys" -version = "1.1.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9702761c3935f8cc2f101793272e202c72b99da8f4224a19ddcf1279a6450bbf" -dependencies = [ - "cc", - "pkg-config", - "vcpkg", -] - [[package]] name = "linked-hash-map" version = "0.5.6" @@ -2053,9 +2028,9 @@ dependencies = [ [[package]] name = "rocksdb" -version = "0.19.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7e9562ea1d70c0cc63a34a22d977753b50cca91cc6b6527750463bd5dd8697bc" +checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" dependencies = [ "libc", "librocksdb-sys", @@ -3286,13 +3261,3 @@ checksum = "70b40401a28d86ce16a330b863b86fd7dbee4d7c940587ab09ab8c019f9e3fdf" dependencies = [ "num-traits", ] - -[[package]] -name = "zstd-sys" -version = "2.0.1+zstd.1.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fd07cbbc53846d9145dbffdf6dd09a7a0aa52be46741825f5c97bdd4f73f12b" -dependencies = [ - "cc", - "libc", -] diff --git a/Cargo.toml b/Cargo.toml index f1ef0bb1..031f2798 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } -rocksdb = { version = "0.19.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } +rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } thread_local = "1.1.3" # used for TURN server authentication @@ -94,7 +94,7 @@ lazy_static = "1.4.0" async-trait = "0.1.57" [features] -default = ["conduit_bin", "backend_sqlite", "jemalloc"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] #backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 07277287..96027f6a 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -161,6 +161,7 @@ impl KvTree for RocksDbEngineTree<'_> { self.db .rocks .iterator_cf(&self.cf(), rocksdb::IteratorMode::Start) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -184,6 +185,7 @@ impl KvTree for RocksDbEngineTree<'_> { }, ), ) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))), ) } @@ -224,6 +226,7 @@ impl KvTree for RocksDbEngineTree<'_> { &self.cf(), rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), ) + //.map(|r| r.unwrap()) .map(|(k, v)| (Vec::from(k), Vec::from(v))) .take_while(move |(k, _)| k.starts_with(&prefix)), ) diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index d84bd494..fddbd67d 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -66,7 +66,6 @@ impl service::sending::Data for KeyValueDatabase { let mut keys = Vec::new(); for (outgoing_kind, event) in requests { let mut key = outgoing_kind.get_prefix(); - key.push(0xff); key.extend_from_slice(if let SendingEventType::Pdu(value) = &event { &**value } else { @@ -139,6 +138,7 @@ fn parse_servercurrentevent( let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(server).map_err(|_| { Error::bad_database("Invalid server bytes in server_currenttransaction") })?; @@ -169,6 +169,7 @@ fn parse_servercurrentevent( let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + ( OutgoingKind::Push(user_id, pushkey_string), if value.is_empty() { @@ -185,6 +186,7 @@ fn parse_servercurrentevent( let event = parts .next() .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let server = utils::string_from_bytes(server).map_err(|_| { Error::bad_database("Invalid server bytes in server_currenttransaction") })?; diff --git a/src/database/mod.rs b/src/database/mod.rs index 689ab57e..9f893d6f 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -556,7 +556,7 @@ impl KeyValueDatabase { services() .rooms .state_compressor - .load_shortstatehash_info(dbg!(last_roomsstatehash)) + .load_shortstatehash_info(last_roomsstatehash) }, )?; @@ -579,7 +579,7 @@ impl KeyValueDatabase { }; services().rooms.state_compressor.save_state_from_diff( - dbg!(current_sstatehash), + current_sstatehash, statediffnew, statediffremoved, 2, // every state change is 2 event changes on average diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 697ca85c..20c652f5 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -587,7 +587,7 @@ impl Service { .notification_count(&userid, &pdu.room_id) .map_err(|e| (kind.clone(), e))? .try_into() - .expect("notifiation count can't go that high"); + .expect("notification count can't go that high"); let permit = services().sending.maximum_requests.acquire().await; @@ -616,6 +616,7 @@ impl Service { .get_pdu_json_from_id(pdu_id) .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? .ok_or_else(|| { + error!("event not found: {server} {pdu_id:?}"); ( OutgoingKind::Normal(server.clone()), Error::bad_database( From f430b874598f4262e5af5ecee8dd396e317a1e87 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 14:09:11 +0200 Subject: [PATCH 405/445] cargo clippy --- src/api/client_server/account.rs | 4 +- src/api/client_server/keys.rs | 4 +- src/api/client_server/membership.rs | 20 ++++---- src/api/client_server/room.rs | 19 ++++---- src/api/client_server/session.rs | 10 ++-- src/api/client_server/sync.rs | 8 ++-- src/api/client_server/to_device.rs | 2 +- src/api/client_server/user_directory.rs | 2 +- src/api/server_server.rs | 33 ++++++------- src/config/mod.rs | 2 +- src/database/abstraction/rocksdb.rs | 2 +- src/database/abstraction/sqlite.rs | 14 +++--- src/database/key_value/globals.rs | 6 +-- src/database/key_value/pusher.rs | 4 +- src/database/key_value/rooms/alias.rs | 2 +- src/database/key_value/rooms/edus/presence.rs | 2 +- src/database/key_value/rooms/edus/typing.rs | 2 +- src/database/key_value/rooms/search.rs | 2 +- src/database/key_value/rooms/timeline.rs | 5 +- src/database/key_value/rooms/user.rs | 2 +- src/database/key_value/sending.rs | 10 ++-- src/database/key_value/users.rs | 12 ++--- src/database/mod.rs | 11 ++--- src/lib.rs | 2 +- src/main.rs | 5 +- src/service/admin/mod.rs | 32 ++++++------- src/service/pusher/mod.rs | 3 +- src/service/rooms/event_handler/mod.rs | 12 ++--- src/service/rooms/state/data.rs | 3 +- src/service/rooms/state/mod.rs | 4 +- src/service/rooms/timeline/mod.rs | 48 +++++++++---------- src/service/sending/mod.rs | 18 +++---- 32 files changed, 139 insertions(+), 166 deletions(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 673bbb42..17b2920a 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -333,7 +333,7 @@ pub async fn whoami_route(body: Ruma) -> Result bool>( let mut get_over_federation = HashMap::new(); for (user_id, device_ids) in device_keys_input { - let user_id: &UserId = &**user_id; + let user_id: &UserId = user_id; if user_id.server_name() != services().globals.server_name() { get_over_federation diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 4f791c71..b69a6d1f 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -62,15 +62,13 @@ pub async fn join_room_by_id_route( servers.push(body.room_id.server_name().to_owned()); - let ret = join_room_by_id_helper( + join_room_by_id_helper( body.sender_user.as_deref(), &body.room_id, &servers, body.third_party_signed.as_ref(), ) - .await; - - ret + .await } /// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` @@ -171,7 +169,7 @@ pub async fn kick_user_route( .room_state_get( &body.room_id, &StateEventType::RoomMember, - &body.user_id.to_string(), + body.user_id.as_ref(), )? .ok_or(Error::BadRequest( ErrorKind::BadState, @@ -230,7 +228,7 @@ pub async fn ban_user_route( .room_state_get( &body.room_id, &StateEventType::RoomMember, - &body.user_id.to_string(), + body.user_id.as_ref(), )? .map_or( Ok(RoomMemberEventContent { @@ -297,7 +295,7 @@ pub async fn unban_user_route( .room_state_get( &body.room_id, &StateEventType::RoomMember, - &body.user_id.to_string(), + body.user_id.as_ref(), )? .ok_or(Error::BadRequest( ErrorKind::BadState, @@ -408,7 +406,7 @@ pub async fn get_member_events_route( .await? .iter() .filter(|(key, _)| key.0 == StateEventType::RoomMember) - .map(|(_, pdu)| pdu.to_member_event().into()) + .map(|(_, pdu)| pdu.to_member_event()) .collect(), }) } @@ -864,7 +862,7 @@ pub(crate) async fn invite_helper<'a>( "${}", ruma::signatures::reference_hash( &pdu_json, - &services().rooms.state.get_room_version(&room_id)? + &services().rooms.state.get_room_version(room_id)? ) .expect("ruma can calculate reference hashes") ); @@ -878,7 +876,7 @@ pub(crate) async fn invite_helper<'a>( create_invite::v2::Request { room_id, event_id: expected_event_id, - room_version: &services().rooms.state.get_room_version(&room_id)?, + room_version: &services().rooms.state.get_room_version(room_id)?, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, @@ -938,7 +936,7 @@ pub(crate) async fn invite_helper<'a>( if !services() .rooms .state_cache - .is_joined(sender_user, &room_id)? + .is_joined(sender_user, room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index 43b2e8e6..097f0e14 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,8 +1,6 @@ use crate::{ api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma, }; -use ruma::serde::JsonObject; -use ruma::OwnedRoomAliasId; use ruma::{ api::client::{ error::ErrorKind, @@ -23,7 +21,9 @@ use ruma::{ }, RoomEventType, StateEventType, }, - int, CanonicalJsonObject, RoomAliasId, RoomId, + int, + serde::JsonObject, + CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, }; use serde_json::{json, value::to_raw_value}; use std::{cmp::max, collections::BTreeMap, sync::Arc}; @@ -213,14 +213,11 @@ pub async fn create_room_route( // 3. Power levels // Figure out preset. We need it for preset specific events - let preset = body - .preset - .clone() - .unwrap_or_else(|| match &body.visibility { - room::Visibility::Private => RoomPreset::PrivateChat, - room::Visibility::Public => RoomPreset::PublicChat, - _ => RoomPreset::PrivateChat, // Room visibility should not be custom - }); + let preset = body.preset.clone().unwrap_or(match &body.visibility { + room::Visibility::Private => RoomPreset::PrivateChat, + room::Visibility::Public => RoomPreset::PublicChat, + _ => RoomPreset::PrivateChat, // Room visibility should not be custom + }); let mut users = BTreeMap::new(); users.insert(sender_user.clone(), int!(100)); diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 61825167..f62ccbb6 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -53,11 +53,11 @@ pub async fn login_route(body: Ruma) -> Result u32 { } fn default_cleanup_second_interval() -> u32 { - 1 * 60 // every minute + 60 // every minute } fn default_max_request_size() -> u32 { diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs index 96027f6a..34d91d29 100644 --- a/src/database/abstraction/rocksdb.rs +++ b/src/database/abstraction/rocksdb.rs @@ -193,7 +193,7 @@ impl KvTree for RocksDbEngineTree<'_> { fn increment(&self, key: &[u8]) -> Result> { let lock = self.write_lock.write().unwrap(); - let old = self.db.rocks.get_cf(&self.cf(), &key)?; + let old = self.db.rocks.get_cf(&self.cf(), key)?; let new = utils::increment(old.as_deref()).unwrap(); self.db.rocks.put_cf(&self.cf(), key, &new)?; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 02d4dbd6..4961fd74 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -48,13 +48,13 @@ pub struct Engine { impl Engine { fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result { - let conn = Connection::open(&path)?; + let conn = Connection::open(path)?; - conn.pragma_update(Some(Main), "page_size", &2048)?; - conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; - conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; - conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; - conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; + conn.pragma_update(Some(Main), "page_size", 2048)?; + conn.pragma_update(Some(Main), "journal_mode", "WAL")?; + conn.pragma_update(Some(Main), "synchronous", "NORMAL")?; + conn.pragma_update(Some(Main), "cache_size", -i64::from(cache_size_kb))?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", 0)?; Ok(conn) } @@ -75,7 +75,7 @@ impl Engine { pub fn flush_wal(self: &Arc) -> Result<()> { self.write_lock() - .pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?; + .pragma_update(Some(Main), "wal_checkpoint", "RESTART")?; Ok(()) } } diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs index 4332930f..7b7675ca 100644 --- a/src/database/key_value/globals.rs +++ b/src/database/key_value/globals.rs @@ -134,7 +134,7 @@ impl service::globals::Data for KeyValueDatabase { let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff); - let keypair = utils::string_from_bytes( + utils::string_from_bytes( // 1. version parts .next() @@ -151,9 +151,7 @@ impl service::globals::Data for KeyValueDatabase { .and_then(|(version, key)| { Ed25519KeyPair::from_der(key, version) .map_err(|_| Error::bad_database("Private or public keys are invalid.")) - }); - - keypair + }) } fn remove_keypair(&self) -> Result<()> { self.global.remove(b"keypair") diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs index 42d4030b..3dfceb6a 100644 --- a/src/database/key_value/pusher.rs +++ b/src/database/key_value/pusher.rs @@ -40,7 +40,7 @@ impl service::pusher::Data for KeyValueDatabase { self.senderkey_pusher .get(&senderkey)? .map(|push| { - serde_json::from_slice(&*push) + serde_json::from_slice(&push) .map_err(|_| Error::bad_database("Invalid Pusher in db.")) }) .transpose() @@ -53,7 +53,7 @@ impl service::pusher::Data for KeyValueDatabase { self.senderkey_pusher .scan_prefix(prefix) .map(|(_, push)| { - serde_json::from_slice(&*push) + serde_json::from_slice(&push) .map_err(|_| Error::bad_database("Invalid Pusher in db.")) }) .collect() diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs index c0f6de89..6f230323 100644 --- a/src/database/key_value/rooms/alias.rs +++ b/src/database/key_value/rooms/alias.rs @@ -9,7 +9,7 @@ impl service::rooms::alias::Data for KeyValueDatabase { let mut aliasid = room_id.as_bytes().to_vec(); aliasid.push(0xff); aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; + self.aliasid_alias.insert(&aliasid, alias.as_bytes())?; Ok(()) } diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 5259beff..904b1c44 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -88,7 +88,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { for (key, value) in self .presenceid_presence - .iter_from(&*first_possible_edu, false) + .iter_from(&first_possible_edu, false) .take_while(|(key, _)| key.starts_with(&prefix)) { let user_id = UserId::parse( diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs index 4e6c86b4..4a2f0f96 100644 --- a/src/database/key_value/rooms/edus/typing.rs +++ b/src/database/key_value/rooms/edus/typing.rs @@ -17,7 +17,7 @@ impl service::rooms::edus::typing::Data for KeyValueDatabase { room_typing_id.extend_from_slice(&count); self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; + .insert(&room_typing_id, user_id.as_bytes())?; self.roomid_lasttypingupdate .insert(room_id.as_bytes(), &count)?; diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs index 788c2965..19ae57b4 100644 --- a/src/database/key_value/rooms/search.rs +++ b/src/database/key_value/rooms/search.rs @@ -15,7 +15,7 @@ impl service::rooms::search::Data for KeyValueDatabase { let mut key = shortroomid.to_be_bytes().to_vec(); key.extend_from_slice(word.as_bytes()); key.push(0xff); - key.extend_from_slice(&pdu_id); + key.extend_from_slice(pdu_id); (key, Vec::new()) }); diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index 0c6c2dde..336317da 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -39,7 +39,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { { hash_map::Entry::Vacant(v) => { if let Some(last_count) = self - .pdus_until(&sender_user, &room_id, u64::MAX)? + .pdus_until(sender_user, room_id, u64::MAX)? .filter_map(|r| { // Filter out buggy events if r.is_err() { @@ -205,8 +205,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { .unwrap() .insert(pdu.room_id.clone(), count); - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; + self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; Ok(()) diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index e678c878..3d8d1c85 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -114,7 +114,7 @@ impl service::rooms::user::Data for KeyValueDatabase { utils::common_elements(iterators, Ord::cmp) .expect("users is not empty") .map(|bytes| { - RoomId::parse(utils::string_from_bytes(&*bytes).map_err(|_| { + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("Invalid RoomId bytes in userroomid_joined") })?) .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index fddbd67d..5424e8c5 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -38,7 +38,7 @@ impl service::sending::Data for KeyValueDatabase { fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { let prefix = outgoing_kind.get_prefix(); - for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix) { self.servercurrentevent_data.remove(&key)?; } @@ -51,7 +51,7 @@ impl service::sending::Data for KeyValueDatabase { self.servercurrentevent_data.remove(&key).unwrap(); } - for (key, _) in self.servernameevent_data.scan_prefix(prefix.clone()) { + for (key, _) in self.servernameevent_data.scan_prefix(prefix) { self.servernameevent_data.remove(&key).unwrap(); } @@ -67,7 +67,7 @@ impl service::sending::Data for KeyValueDatabase { for (outgoing_kind, event) in requests { let mut key = outgoing_kind.get_prefix(); key.extend_from_slice(if let SendingEventType::Pdu(value) = &event { - &**value + value } else { &[] }); @@ -91,7 +91,7 @@ impl service::sending::Data for KeyValueDatabase { let prefix = outgoing_kind.get_prefix(); return Box::new( self.servernameevent_data - .scan_prefix(prefix.clone()) + .scan_prefix(prefix) .map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))), ); } @@ -155,7 +155,7 @@ fn parse_servercurrentevent( let mut parts = key[1..].splitn(3, |&b| b == 0xff); let user = parts.next().expect("splitn always returns one element"); - let user_string = utils::string_from_bytes(&user) + let user_string = utils::string_from_bytes(user) .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; let user_id = UserId::parse(user_string) .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; diff --git a/src/database/key_value/users.rs b/src/database/key_value/users.rs index f7ee07cf..cd5a5352 100644 --- a/src/database/key_value/users.rs +++ b/src/database/key_value/users.rs @@ -5,10 +5,9 @@ use ruma::{ encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedUserId, UInt, - UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, + OwnedDeviceKeyId, OwnedMxcUri, OwnedUserId, UInt, UserId, }; -use ruma::{OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri}; use tracing::warn; use crate::{ @@ -380,13 +379,12 @@ impl service::users::Data for KeyValueDatabase { Ok(( serde_json::from_slice( - &*key - .rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, ) .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, - serde_json::from_slice(&*value) + serde_json::from_slice(&value) .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, )) }) @@ -410,7 +408,7 @@ impl service::users::Data for KeyValueDatabase { .map(|(bytes, _)| { Ok::<_, Error>( serde_json::from_slice::( - &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { + bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { Error::bad_database("OneTimeKey ID in db is invalid.") })?, ) diff --git a/src/database/mod.rs b/src/database/mod.rs index 9f893d6f..15ee1373 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -2,22 +2,17 @@ pub mod abstraction; pub mod key_value; use crate::{services, utils, Config, Error, PduEvent, Result, Services, SERVICES}; -use abstraction::KeyValueDatabaseEngine; -use abstraction::KvTree; +use abstraction::{KeyValueDatabaseEngine, KvTree}; use directories::ProjectDirs; use lru_cache::LruCache; -use ruma::CanonicalJsonValue; -use ruma::OwnedDeviceId; -use ruma::OwnedEventId; -use ruma::OwnedRoomId; -use ruma::OwnedUserId; use ruma::{ events::{ push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, }, push::Ruleset, - EventId, RoomId, UserId, + CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, + UserId, }; use std::{ collections::{BTreeMap, HashMap, HashSet}, diff --git a/src/lib.rs b/src/lib.rs index 541b8c8d..3d7f7ae9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -24,7 +24,7 @@ pub use utils::error::{Error, Result}; pub static SERVICES: RwLock> = RwLock::new(None); pub fn services<'a>() -> &'static Services { - &SERVICES + SERVICES .read() .unwrap() .expect("SERVICES should be initialized when this is called") diff --git a/src/main.rs b/src/main.rs index 0bba2aba..bdbeaa6a 100644 --- a/src/main.rs +++ b/src/main.rs @@ -444,7 +444,7 @@ impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); fn method_to_filter(method: Method) -> MethodFilter { - let method_filter = match method { + match method { Method::DELETE => MethodFilter::DELETE, Method::GET => MethodFilter::GET, Method::HEAD => MethodFilter::HEAD, @@ -454,6 +454,5 @@ fn method_to_filter(method: Method) -> MethodFilter { Method::PUT => MethodFilter::PUT, Method::TRACE => MethodFilter::TRACE, m => panic!("Unsupported HTTP method: {:?}", m), - }; - method_filter + } } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 9e3f586a..b14ce2b1 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -179,7 +179,7 @@ impl Service { } pub fn start_handler(self: &Arc) { - let self2 = Arc::clone(&self); + let self2 = Arc::clone(self); tokio::spawn(async move { self2.handler().await; }); @@ -270,13 +270,11 @@ impl Service { let command_line = lines.next().expect("each string has at least one line"); let body: Vec<_> = lines.collect(); - let admin_command = match self.parse_admin_command(&command_line) { + let admin_command = match self.parse_admin_command(command_line) { Ok(command) => command, Err(error) => { let server_name = services().globals.server_name(); - let message = error - .to_string() - .replace("server.name", server_name.as_str()); + let message = error.replace("server.name", server_name.as_str()); let html_message = self.usage_to_html(&message, server_name); return RoomMessageEventContent::text_html(message, html_message); @@ -316,8 +314,8 @@ impl Service { // Backwards compatibility with `register_appservice`-style commands let command_with_dashes; - if argv.len() > 1 && argv[1].contains("_") { - command_with_dashes = argv[1].replace("_", "-"); + if argv.len() > 1 && argv[1].contains('_') { + command_with_dashes = argv[1].replace('_', "-"); argv[1] = &command_with_dashes; } @@ -631,7 +629,7 @@ impl Service { let displayname = format!("{} ⚡️", user_id.localpart()); services() .users - .set_displayname(&user_id, Some(displayname.clone()))?; + .set_displayname(&user_id, Some(displayname))?; // Initial account data services().account_data.update( @@ -771,7 +769,7 @@ impl Service { let text = text.replace("subcommand", "command"); // Escape option names (e.g. ``) since they look like HTML tags - let text = text.replace("<", "<").replace(">", ">"); + let text = text.replace('<', "<").replace('>', ">"); // Italicize the first line (command name and version text) let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); @@ -799,7 +797,7 @@ impl Service { while text_lines .get(line_index) - .map(|line| line.starts_with("#")) + .map(|line| line.starts_with('#')) .unwrap_or(false) { command_body += if text_lines[line_index].starts_with("# ") { @@ -830,12 +828,10 @@ impl Service { }; // Add HTML line-breaks - let text = text - .replace("\n\n\n", "\n\n") - .replace("\n", "
                \n") - .replace("[nobr]
                ", ""); - text + text.replace("\n\n\n", "\n\n") + .replace('\n', "
                \n") + .replace("[nobr]
                ", "") } /// Create the admin room. @@ -1110,7 +1106,7 @@ impl Service { state_key: Some(user_id.to_string()), redacts: None, }, - &user_id, + user_id, &room_id, &state_lock, )?; @@ -1142,8 +1138,8 @@ impl Service { PduBuilder { event_type: RoomEventType::RoomMessage, content: to_raw_value(&RoomMessageEventContent::text_html( - format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()).to_owned(), - format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()).to_owned(), + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()), + format!("

                Thank you for trying out Conduit!

                \n

                Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

                \n

                Helpful links:

                \n
                \n

                Website: https://conduit.rs
                Git and Documentation: https://gitlab.com/famedly/conduit
                Report issues: https://gitlab.com/famedly/conduit/-/issues

                \n
                \n

                For a list of available commands, send the following message in this room: @conduit:{}: --help

                \n

                Here are some rooms you can join (by typing the command):

                \n

                Conduit room (Ask questions and get notified on updates):
                /join #conduit:fachschaften.org

                \n

                Conduit lounge (Off-topic, only Conduit users are allowed to join)
                /join #conduit-lounge:conduit.rs

                \n", services().globals.server_name()), )) .expect("event is valid, we just created it"), unsigned: None, diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 2d2fa1f9..8f8610c2 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -4,7 +4,6 @@ use ruma::events::AnySyncTimelineEvent; use crate::{services, Error, PduEvent, Result}; use bytes::BytesMut; -use ruma::api::IncomingResponse; use ruma::{ api::{ client::push::{get_pushers, set_pusher, PusherKind}, @@ -12,7 +11,7 @@ use ruma::{ self, v1::{Device, Notification, NotificationCounts, NotificationPriority}, }, - MatrixVersion, OutgoingRequest, SendAccessToken, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, }, events::{ room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index ae63d9a1..cd270c7c 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -284,7 +284,7 @@ impl Service { RoomVersion::new(room_version_id).expect("room version is supported"); let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().expect("RwLock is poisoned."), + &pub_key_map.read().expect("RwLock is poisoned."), &value, room_version_id, ) { @@ -1198,7 +1198,7 @@ impl Service { .fetch_and_handle_outliers( origin, &[prev_event_id.clone()], - &create_event, + create_event, room_id, pub_key_map, ) @@ -1224,7 +1224,7 @@ impl Service { amount += 1; for prev_prev in &pdu.prev_events { if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(prev_prev.clone())); + todo_outlier_stack.push(prev_prev.clone()); } } @@ -1248,7 +1248,7 @@ impl Service { } } - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { + let sorted = state_res::lexicographical_topological_sort(&graph, |event_id| { // This return value is the key used for sorting events, // events are then sorted by power level, time, // and lexically by event_id. @@ -1482,8 +1482,8 @@ impl Service { } let mut futures: FuturesUnordered<_> = servers - .into_iter() - .map(|(server, _)| async move { + .into_keys() + .map(|server| async move { ( services() .sending diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs index 19a1e30a..f52ea72b 100644 --- a/src/service/rooms/state/data.rs +++ b/src/service/rooms/state/data.rs @@ -1,7 +1,6 @@ use crate::Result; use ruma::{EventId, OwnedEventId, RoomId}; -use std::collections::HashSet; -use std::sync::Arc; +use std::{collections::HashSet, sync::Arc}; use tokio::sync::MutexGuard; pub trait Data: Send + Sync { diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 2c49c35a..0e450322 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -93,7 +93,7 @@ impl Service { services().rooms.state_cache.update_joined_count(room_id)?; self.db - .set_room_state(room_id, shortstatehash, &state_lock)?; + .set_room_state(room_id, shortstatehash, state_lock)?; Ok(()) } @@ -331,7 +331,7 @@ impl Service { .transpose()?; let room_version = create_event_content .map(|create_event| create_event.room_version) - .ok_or_else(|| Error::BadDatabase("Invalid room version"))?; + .ok_or(Error::BadDatabase("Invalid room version"))?; Ok(room_version) } diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index dc859d8f..619dca28 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -2,29 +2,29 @@ mod data; use std::collections::HashMap; -use std::collections::HashSet; -use std::sync::{Arc, Mutex}; +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; pub use data::Data; use regex::Regex; -use ruma::canonical_json::to_canonical_value; -use ruma::events::room::power_levels::RoomPowerLevelsEventContent; -use ruma::push::Ruleset; -use ruma::state_res::RoomVersion; -use ruma::CanonicalJsonObject; -use ruma::CanonicalJsonValue; -use ruma::OwnedEventId; -use ruma::OwnedRoomId; -use ruma::OwnedServerName; use ruma::{ api::client::error::ErrorKind, + canonical_json::to_canonical_value, events::{ push_rules::PushRulesEvent, - room::{create::RoomCreateEventContent, member::MembershipState}, + room::{ + create::RoomCreateEventContent, member::MembershipState, + power_levels::RoomPowerLevelsEventContent, + }, GlobalAccountDataEventType, RoomEventType, StateEventType, }, - push::{Action, Tweak}, - state_res, uint, EventId, RoomAliasId, RoomId, UserId, + push::{Action, Ruleset, Tweak}, + state_res, + state_res::RoomVersion, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedServerName, RoomAliasId, RoomId, UserId, }; use serde::Deserialize; use serde_json::value::to_raw_value; @@ -267,7 +267,7 @@ impl Service { .account_data .get( None, - &user, + user, GlobalAccountDataEventType::PushRules.to_string().into(), )? .map(|event| { @@ -276,13 +276,13 @@ impl Service { }) .transpose()? .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(&user)); + .unwrap_or_else(|| Ruleset::server_default(user)); let mut highlight = false; let mut notify = false; for action in services().pusher.get_actions( - &user, + user, &rules_for_user, &power_levels, &sync_pdu, @@ -307,10 +307,8 @@ impl Service { highlights.push(user.clone()); } - for push_key in services().pusher.get_pushkeys(&user) { - services() - .sending - .send_push_pdu(&*pdu_id, &user, push_key?)?; + for push_key in services().pusher.get_pushkeys(user) { + services().sending.send_push_pdu(&pdu_id, user, push_key?)?; } } @@ -388,7 +386,7 @@ impl Service { && services().globals.emergency_password().is_none(); if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { - services().admin.process_message(body.to_string()); + services().admin.process_message(body); } } } @@ -583,8 +581,8 @@ impl Service { prev_events, depth, auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) + .values() + .map(|pdu| pdu.event_id.clone()) .collect(), redacts, unsigned: if unsigned.is_empty() { @@ -683,7 +681,7 @@ impl Service { state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex ) -> Result> { let (pdu, pdu_json) = - self.create_hash_and_sign_event(pdu_builder, sender, room_id, &state_lock)?; + self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 20c652f5..adaf7c0c 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -110,7 +110,7 @@ impl Service { } pub fn start_handler(self: &Arc) { - let self2 = Arc::clone(&self); + let self2 = Arc::clone(self); tokio::spawn(async move { self2.handler().await.unwrap(); }); @@ -280,7 +280,7 @@ impl Service { device_list_changes.extend( services() .users - .keys_changed(&room_id.to_string(), since, None) + .keys_changed(room_id.as_ref(), since, None) .filter_map(|r| r.ok()) .filter(|user_id| user_id.server_name() == services().globals.server_name()), ); @@ -487,7 +487,7 @@ impl Service { let response = appservice_server::send_request( services() .appservice - .get_registration(&id) + .get_registration(id) .map_err(|e| (kind.clone(), e))? .ok_or_else(|| { ( @@ -562,7 +562,7 @@ impl Service { let pusher = match services() .pusher - .get_pusher(&userid, pushkey) + .get_pusher(userid, pushkey) .map_err(|e| (OutgoingKind::Push(userid.clone(), pushkey.clone()), e))? { Some(pusher) => pusher, @@ -573,18 +573,18 @@ impl Service { .account_data .get( None, - &userid, + userid, GlobalAccountDataEventType::PushRules.to_string().into(), ) .unwrap_or_default() .and_then(|event| serde_json::from_str::(event.get()).ok()) .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| push::Ruleset::server_default(&userid)); + .unwrap_or_else(|| push::Ruleset::server_default(userid)); let unread: UInt = services() .rooms .user - .notification_count(&userid, &pdu.room_id) + .notification_count(userid, &pdu.room_id) .map_err(|e| (kind.clone(), e))? .try_into() .expect("notification count can't go that high"); @@ -593,7 +593,7 @@ impl Service { let _response = services() .pusher - .send_push_notice(&userid, unread, &pusher, rules_for_user, &pdu) + .send_push_notice(userid, unread, &pusher, rules_for_user, &pdu) .await .map(|_response| kind.clone()) .map_err(|e| (kind.clone(), e)); @@ -638,7 +638,7 @@ impl Service { let permit = services().sending.maximum_requests.acquire().await; let response = server_server::send_request( - &*server, + server, send_transaction_message::v1::Request { origin: services().globals.server_name(), pdus: &pdu_jsons, From 71cffcd5379fbca8ad283c695da18adaa98412e2 Mon Sep 17 00:00:00 2001 From: Jonas Zohren Date: Wed, 22 Jun 2022 22:14:53 +0000 Subject: [PATCH 406/445] feat(ci): Split clippy into own fallible job For some reason, the clippy build does not work. This change allows the cargo:test job to still succeed and the pipeline to pass --- .gitlab-ci.yml | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 380332b1..eb7a96fd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -250,17 +250,30 @@ docker:tags:dockerhub: test:cargo: extends: .test-shared-settings before_script: - - rustup component add clippy # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi script: - rustc --version && cargo --version # Print version info for debugging - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" - - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" artifacts: when: always reports: junit: report.xml + + +test:clippy: + extends: .test-shared-settings + allow_failure: true + before_script: + - rustup component add clippy + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi + script: + - rustc --version && cargo --version # Print version info for debugging + - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + artifacts: + when: always + reports: codequality: gl-code-quality-report.json test:format: From df8703cc1304779449fde3a9bf9d1122e5345def Mon Sep 17 00:00:00 2001 From: Jim Date: Thu, 23 Jun 2022 06:58:34 +0000 Subject: [PATCH 407/445] Lightning bolt optional --- conduit-example.toml | 3 +++ src/api/client_server/account.rs | 9 +++++++-- src/config/mod.rs | 6 ++++++ src/service/admin/mod.rs | 8 +++++++- src/service/globals/mod.rs | 4 ++++ 5 files changed, 27 insertions(+), 3 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 362f7e7e..5eed0708 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -40,6 +40,9 @@ allow_registration = true allow_federation = true +# Enable the display name lightning bolt on registration. +enable_lightning_bolt = true + trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 17b2920a..51343ae2 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -12,7 +12,6 @@ use ruma::{ events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType}, push, UserId, }; - use tracing::{info, warn}; use register::RegistrationKind; @@ -169,7 +168,13 @@ pub async fn register_route( services().users.create(&user_id, password)?; // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); + let mut displayname = user_id.localpart().to_owned(); + + // If enabled append lightning bolt to display name (default true) + if services().globals.enable_lightning_bolt() { + displayname.push_str(" ⚡️"); + } + services() .users .set_displayname(&user_id, Some(displayname.clone()))?; diff --git a/src/config/mod.rs b/src/config/mod.rs index b60b9cff..31d96b6a 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -26,6 +26,8 @@ pub struct Config { pub database_path: String, #[serde(default = "default_db_cache_capacity_mb")] pub db_cache_capacity_mb: f64, + #[serde(default = "true_fn")] + pub enable_lightning_bolt: bool, #[serde(default = "default_conduit_cache_capacity_modifier")] pub conduit_cache_capacity_modifier: f64, #[serde(default = "default_rocksdb_max_open_files")] @@ -135,6 +137,10 @@ impl fmt::Display for Config { &self.max_concurrent_requests.to_string(), ), ("Allow registration", &self.allow_registration.to_string()), + ( + "Enabled lightning bolt", + &self.enable_lightning_bolt.to_string(), + ), ("Allow encryption", &self.allow_encryption.to_string()), ("Allow federation", &self.allow_federation.to_string()), ("Allow room creation", &self.allow_room_creation.to_string()), diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b14ce2b1..91103785 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -626,7 +626,13 @@ impl Service { services().users.create(&user_id, Some(password.as_str()))?; // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); + let mut displayname = user_id.localpart().to_owned(); + + // If enabled append lightning bolt to display name (default true) + if services().globals.enable_lightning_bolt() { + displayname.push_str(" ⚡️"); + } + services() .users .set_displayname(&user_id, Some(displayname))?; diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 44192e01..4daddab0 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -245,6 +245,10 @@ impl Service { self.config.default_room_version.clone() } + pub fn enable_lightning_bolt(&self) -> bool { + self.config.enable_lightning_bolt + } + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } From 7cf060ae5b31f5cf5b03bb31bb327067f37b5035 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 23 Jun 2022 09:04:19 +0200 Subject: [PATCH 408/445] Bump version to 0.4 --- Cargo.lock | 2 +- Cargo.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 941634e0..760b4d90 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -372,7 +372,7 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.3.0-next" +version = "0.4.0-next" dependencies = [ "async-trait", "axum", diff --git a/Cargo.toml b/Cargo.toml index 031f2798..e007e492 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,7 +6,7 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.3.0-next" +version = "0.4.0-next" rust-version = "1.63" edition = "2021" From 18ca2e4c2984eb265e4fe2ef12b87bf50a8fdd60 Mon Sep 17 00:00:00 2001 From: majso Date: Sat, 25 Jun 2022 21:59:49 +0000 Subject: [PATCH 409/445] Dockerfile: changing DB path to be same as we are using in CI --- Dockerfile | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 76d10ea9..8a76c470 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,9 +36,11 @@ FROM docker.io/debian:bullseye-slim AS runner # You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 +ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit + ENV CONDUIT_PORT=6167 \ CONDUIT_ADDRESS="0.0.0.0" \ - CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit \ + CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ CONDUIT_CONFIG='' # └─> Set no config file to do all configuration with env vars @@ -51,9 +53,6 @@ RUN apt-get update && apt-get -y --no-install-recommends install \ wget \ && rm -rf /var/lib/apt/lists/* -# Created directory for the database and media files -RUN mkdir -p /srv/conduit/.local/share/conduit - # Test if Conduit is still alive, uses the same endpoint as Element COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh @@ -69,10 +68,12 @@ RUN set -x ; \ groupadd -r -g ${GROUP_ID} conduit ; \ useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1 -# Change ownership of Conduit files to conduit user and group and make the healthcheck executable: +# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable: RUN chown -cR conduit:conduit /srv/conduit && \ - chmod +x /srv/conduit/healthcheck.sh - + chmod +x /srv/conduit/healthcheck.sh && \ + mkdir -p ${DEFAULT_DB_PATH} && \ + chown -cR conduit:conduit ${DEFAULT_DB_PATH} + # Change user to conduit, no root permissions afterwards: USER conduit # Set container home directory From c15205fb4679b7058790c9691c3f767d4b2c3c3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 14 Aug 2022 19:29:46 +0200 Subject: [PATCH 410/445] fix(client/keys): ignore non-signature keys in signature upload route --- src/api/client_server/keys.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 9a21dd60..837e1662 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -148,11 +148,24 @@ pub async fn upload_signatures_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for (user_id, signed_keys) in &body.signed_keys { - for (key_id, signed_key) in signed_keys { - let signed_key = serde_json::to_value(signed_key).unwrap(); + for (user_id, keys) in &body.signed_keys { + for (key_id, key) in keys { + let key = serde_json::to_value(key) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + + let is_signature_key = match key.get("usage") { + Some(usage) => usage + .as_array() + .map(|usage| !usage.contains(&json!("master"))) + .unwrap_or(false), + None => true, + }; + + if !is_signature_key { + continue; + } - for signature in signed_key + for signature in key .get("signatures") .ok_or(Error::BadRequest( ErrorKind::InvalidParam, From 0ddc3c01ef4b1c62e2a9f58b53a137f15ca72b14 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 14 Aug 2022 19:33:13 +0200 Subject: [PATCH 411/445] style(client/keys): rename signature key to signed key --- src/api/client_server/keys.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 837e1662..2de785fd 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -153,7 +153,7 @@ pub async fn upload_signatures_route( let key = serde_json::to_value(key) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; - let is_signature_key = match key.get("usage") { + let is_signed_key = match key.get("usage") { Some(usage) => usage .as_array() .map(|usage| !usage.contains(&json!("master"))) @@ -161,7 +161,7 @@ pub async fn upload_signatures_route( None => true, }; - if !is_signature_key { + if !is_signed_key { continue; } From 2b7c19835b65e4dd3a6a32466a9f45b06bf1ced2 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 10 Oct 2022 15:00:44 +0200 Subject: [PATCH 412/445] Add room version 10 to experimental versions --- src/service/globals/mod.rs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 4daddab0..d4c9dad0 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -129,7 +129,12 @@ impl Service { RoomVersionId::V9, ]; // Experimental, partially supported room versions - let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; + let unstable_room_versions = vec![ + RoomVersionId::V3, + RoomVersionId::V4, + RoomVersionId::V5, + RoomVersionId::V10, + ]; let mut s = Self { db, From c30cc6120b31e8d631bfe8c988d399c120c638c3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 11:53:13 +0200 Subject: [PATCH 413/445] fix: send right errors on make/send join in restricted rooms --- src/api/server_server.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 37505983..c9f6a78e 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1283,10 +1283,10 @@ pub async fn create_join_event_template_route( if let Some(join_rules_event_content) = join_rules_event_content { if matches!( join_rules_event_content.join_rule, - JoinRule::Restricted { .. } + JoinRule::Restricted { .. } | JoinRule::KnockRestricted { .. } ) { return Err(Error::BadRequest( - ErrorKind::Unknown, + ErrorKind::UnableToAuthorizeJoin, "Conduit does not support restricted rooms yet.", )); } @@ -1376,10 +1376,10 @@ async fn create_join_event( if let Some(join_rules_event_content) = join_rules_event_content { if matches!( join_rules_event_content.join_rule, - JoinRule::Restricted { .. } + JoinRule::Restricted { .. } | JoinRule::KnockRestricted { .. } ) { return Err(Error::BadRequest( - ErrorKind::Unknown, + ErrorKind::UnableToAuthorizeJoin, "Conduit does not support restricted rooms yet.", )); } From fb6bfa97530e09f3d2c69dfde3a65c4c633f937a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 15:25:10 +0200 Subject: [PATCH 414/445] fix: missing field `origin` error with synapse servers --- Cargo.toml | 2 +- src/api/server_server.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index e007e492..37b05294 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index c9f6a78e..eabe8c85 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1470,7 +1470,6 @@ async fn create_join_event( .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) .map(PduEvent::convert_to_outgoing_federation_event) .collect(), - origin: services().globals.server_name().to_string(), }) } From 31d180191262b2e130e5c7463cdd8d12ee7c6c98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 17:10:09 +0200 Subject: [PATCH 415/445] fix: workaround for missing avatars on element and rooms becoming historical --- src/api/client_server/message.rs | 13 +++++++++++++ src/api/client_server/sync.rs | 2 ++ 2 files changed, 15 insertions(+) diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index e086e4af..2b5bdf9d 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -170,6 +170,9 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_after { + /* TODO: Remove this when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, @@ -178,6 +181,8 @@ pub async fn get_message_events_route( )? { lazy_loaded.insert(event.sender.clone()); } + */ + lazy_loaded.insert(event.sender.clone()); } next_token = events_after.last().map(|(count, _)| count).copied(); @@ -210,6 +215,9 @@ pub async fn get_message_events_route( .collect(); for (_, event) in &events_before { + /* TODO: Remove this when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 if !services().rooms.lazy_loading.lazy_load_was_sent_before( sender_user, sender_device, @@ -218,6 +226,8 @@ pub async fn get_message_events_route( )? { lazy_loaded.insert(event.sender.clone()); } + */ + lazy_loaded.insert(event.sender.clone()); } next_token = events_before.last().map(|(count, _)| count).copied(); @@ -244,6 +254,8 @@ pub async fn get_message_events_route( } } + // TODO: enable again when we are sure clients can handle it + /* if let Some(next_token) = next_token { services().rooms.lazy_loading.lazy_load_mark_sent( sender_user, @@ -253,6 +265,7 @@ pub async fn get_message_events_route( next_token, ); } + */ Ok(resp) } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index d876a926..483b32bf 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -433,6 +433,8 @@ async fn sync_helper( } else if !lazy_load_enabled || body.full_state || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key { let pdu = match services().rooms.timeline.get_pdu(&id)? { Some(pdu) => pdu, From 68227c06c398ece491cd7f3b7bebe254dcdb43f6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 17:10:56 +0200 Subject: [PATCH 416/445] fix: state for left rooms --- src/api/client_server/sync.rs | 86 ++++++++++++++++++++++++++++++++++- 1 file changed, 85 insertions(+), 1 deletion(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 483b32bf..739b42f3 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -827,7 +827,9 @@ async fn sync_helper( .rooms_left(&sender_user) .collect(); for result in all_left_rooms { - let (room_id, left_state_events) = result?; + let (room_id, _) = result?; + + let mut left_state_events = Vec::new(); { // Get and drop the lock to wait for remaining operations to finish @@ -854,6 +856,88 @@ async fn sync_helper( continue; } + if !services().rooms.metadata.exists(&room_id)? { + // This is just a rejected invite, not a room we know + continue; + } + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; + + let since_state_ids = match since_shortstatehash { + Some(s) => services().rooms.state_accessor.state_full_ids(s).await?, + None => BTreeMap::new(), + }; + + let left_event_id = match services().rooms.state_accessor.room_state_get_id( + &room_id, + &StateEventType::RoomMember, + sender_user.as_str(), + )? { + Some(e) => e, + None => { + error!("Left room but no left state event"); + continue; + } + }; + + let left_shortstatehash = match services() + .rooms + .state_accessor + .pdu_shortstatehash(&left_event_id)? + { + Some(s) => s, + None => { + error!("Leave event has no state"); + continue; + } + }; + + let mut left_state_ids = services() + .rooms + .state_accessor + .state_full_ids(left_shortstatehash) + .await?; + + let leave_shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &StateEventType::RoomMember, + &sender_user.as_str(), + )?; + + left_state_ids.insert(leave_shortstatekey, left_event_id); + + let mut i = 0; + for (key, id) in left_state_ids { + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let (event_type, state_key) = + services().rooms.short.get_statekey_from_short(key)?; + + if !lazy_load_enabled + || event_type != StateEventType::RoomMember + || body.full_state + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key + { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + left_state_events.push(pdu.to_sync_state_event()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + } + left_rooms.insert( room_id.clone(), LeftRoom { From d1e5acd7b3270fd08601dc9ec2eaad2455a53b9f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 17:59:49 +0200 Subject: [PATCH 417/445] fix: don't panic on missing events in state --- src/api/server_server.rs | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index eabe8c85..35c01f9f 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -55,7 +55,7 @@ use std::{ time::{Duration, Instant, SystemTime}, }; -use tracing::{info, warn}; +use tracing::{error, info, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -1149,16 +1149,18 @@ pub async fn get_room_state_route( Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids - .map(|id| { - services() + .filter_map(|id| { + match services() .rooms .timeline - .get_pdu_json(&id) - .map(|maybe_json| { - PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) - }) + .get_pdu_json(&id).ok()? { + Some(json) => Some(PduEvent::convert_to_outgoing_federation_event(json)), + None => { + error!("Could not find event json for {id} in db."); + None + } + } }) - .filter_map(|r| r.ok()) .collect(), pdus, }) From 8105c5cc60912afc487a6d045c67a7dada76322e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 18:10:51 +0200 Subject: [PATCH 418/445] cargo fmt --- src/api/client_server/sync.rs | 8 ++++---- src/api/server_server.rs | 19 ++++++++----------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 739b42f3..4b732692 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -901,10 +901,10 @@ async fn sync_helper( .state_full_ids(left_shortstatehash) .await?; - let leave_shortstatekey = services().rooms.short.get_or_create_shortstatekey( - &StateEventType::RoomMember, - &sender_user.as_str(), - )?; + let leave_shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&StateEventType::RoomMember, &sender_user.as_str())?; left_state_ids.insert(leave_shortstatekey, left_event_id); diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 35c01f9f..03128a7f 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1149,18 +1149,15 @@ pub async fn get_room_state_route( Ok(get_room_state::v1::Response { auth_chain: auth_chain_ids - .filter_map(|id| { - match services() - .rooms - .timeline - .get_pdu_json(&id).ok()? { - Some(json) => Some(PduEvent::convert_to_outgoing_federation_event(json)), - None => { - error!("Could not find event json for {id} in db."); - None - } + .filter_map( + |id| match services().rooms.timeline.get_pdu_json(&id).ok()? { + Some(json) => Some(PduEvent::convert_to_outgoing_federation_event(json)), + None => { + error!("Could not find event json for {id} in db."); + None } - }) + }, + ) .collect(), pdus, }) From d3968c2fd1d901011e5aaf1dd14cecfed5af10bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 21:51:20 +0200 Subject: [PATCH 419/445] fix: bump ruma again to fix state res problems --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- src/api/client_server/media.rs | 9 ++++++++- src/api/client_server/message.rs | 8 ++++---- src/api/client_server/read_marker.rs | 4 +++- src/api/client_server/sync.rs | 1 + 6 files changed, 28 insertions(+), 18 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 760b4d90..9e58bccc 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2039,7 +2039,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.7.4" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "assign", "js_int", @@ -2057,7 +2057,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "ruma-common", "serde", @@ -2067,7 +2067,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.15.1" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "assign", "bytes", @@ -2083,7 +2083,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "base64", "bytes", @@ -2110,7 +2110,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "js_int", "ruma-common", @@ -2121,7 +2121,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "js_int", "thiserror", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "js_int", "ruma-common", @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "once_cell", "proc-macro-crate", @@ -2155,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "js_int", "ruma-common", @@ -2166,7 +2166,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "base64", "ed25519-dalek", @@ -2182,7 +2182,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=86b58cafb8abe29eecd0272d90b40bbb61a7919b#86b58cafb8abe29eecd0272d90b40bbb61a7919b" +source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 37b05294..0b3062da 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "86b58cafb8abe29eecd0272d90b40bbb61a7919b", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "c2c45551335c443ede7fb9158284196899a0c696", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs index c1f5e1de..ae023c95 100644 --- a/src/api/client_server/media.rs +++ b/src/api/client_server/media.rs @@ -104,6 +104,7 @@ pub async fn get_content_route( file, content_type, content_disposition, + cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = @@ -134,6 +135,7 @@ pub async fn get_content_as_filename_route( file, content_type, content_disposition: Some(format!("inline; filename={}", body.filename)), + cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let remote_content_response = @@ -143,6 +145,7 @@ pub async fn get_content_as_filename_route( content_disposition: Some(format!("inline: filename={}", body.filename)), content_type: remote_content_response.content_type, file: remote_content_response.file, + cross_origin_resource_policy: Some("cross-origin".to_owned()), }) } else { Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) @@ -174,7 +177,11 @@ pub async fn get_content_thumbnail_route( ) .await? { - Ok(get_content_thumbnail::v3::Response { file, content_type }) + Ok(get_content_thumbnail::v3::Response { + file, + content_type, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) } else if &*body.server_name != services().globals.server_name() && body.allow_remote { let get_thumbnail_response = services() .sending diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 2b5bdf9d..b04c2626 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -127,8 +127,8 @@ pub async fn get_message_events_route( .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?, None => match body.dir { - get_message_events::v3::Direction::Forward => 0, - get_message_events::v3::Direction::Backward => u64::MAX, + ruma::api::client::Direction::Forward => 0, + ruma::api::client::Direction::Backward => u64::MAX, }, }; @@ -151,7 +151,7 @@ pub async fn get_message_events_route( let mut lazy_loaded = HashSet::new(); match body.dir { - get_message_events::v3::Direction::Forward => { + ruma::api::client::Direction::Forward => { let events_after: Vec<_> = services() .rooms .timeline @@ -196,7 +196,7 @@ pub async fn get_message_events_route( resp.end = next_token.map(|count| count.to_string()); resp.chunk = events_after; } - get_message_events::v3::Direction::Backward => { + ruma::api::client::Direction::Backward => { let events_before: Vec<_> = services() .rooms .timeline diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index bdf467f9..48520fc9 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,7 +1,7 @@ use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::{receipt::ReceiptType, RoomAccountDataEventType}, + events::{receipt::{ReceiptType, ReceiptThread}, RoomAccountDataEventType}, MilliSecondsSinceUnixEpoch, }; use std::collections::BTreeMap; @@ -59,6 +59,7 @@ pub async fn set_read_marker_route( sender_user.clone(), ruma::events::receipt::Receipt { ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, }, ); @@ -119,6 +120,7 @@ pub async fn create_receipt_route( sender_user.clone(), ruma::events::receipt::Receipt { ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, }, ); let mut receipts = BTreeMap::new(); diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 4b732692..828ae19c 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -778,6 +778,7 @@ async fn sync_helper( .collect(), }, ephemeral: Ephemeral { events: edus }, + unread_thread_notifications: BTreeMap::new(), }; if !joined_room.is_empty() { From 2b70d9604a2cb10830a6677c2380374836b4d990 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Tue, 11 Oct 2022 22:37:14 +0200 Subject: [PATCH 420/445] fix: element gets stuck in /initialSync --- src/main.rs | 14 +++++++++++++- src/service/rooms/event_handler/mod.rs | 6 +++++- src/utils/error.rs | 2 +- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index bdbeaa6a..08368415 100644 --- a/src/main.rs +++ b/src/main.rs @@ -342,6 +342,14 @@ fn routes() -> Router { .ruma_route(server_server::get_profile_information_route) .ruma_route(server_server::get_keys_route) .ruma_route(server_server::claim_keys_route) + .route( + "/_matrix/client/r0/rooms/:room_id/initialSync", + get(initial_sync), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/initialSync", + get(initial_sync), + ) .fallback(not_found.into_service()) } @@ -375,7 +383,11 @@ async fn shutdown_signal(handle: ServerHandle) { } async fn not_found(_uri: Uri) -> impl IntoResponse { - Error::BadRequest(ErrorKind::NotFound, "Unknown or unimplemented route") + Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") +} + +async fn initial_sync(_uri: Uri) -> impl IntoResponse { + Error::BadRequest(ErrorKind::GuestAccessForbidden, "Guest access not implemented") } trait RouterExt { diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index cd270c7c..477a9719 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -44,6 +44,7 @@ impl Service { /// When receiving an event one needs to: /// 0. Check the server is in the room /// 1. Skip the PDU if we already know about it + /// 1.1. Remove unsigned field /// 2. Check signatures, otherwise drop /// 3. Check content hash, redact if doesn't match /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not @@ -260,10 +261,13 @@ impl Service { create_event: &'a PduEvent, event_id: &'a EventId, room_id: &'a RoomId, - value: BTreeMap, + mut value: BTreeMap, pub_key_map: &'a RwLock>>, ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { Box::pin(async move { + // 1.1. Remove unsigned field + value.remove("unsigned"); + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json // We go through all the signatures we see on the value and fetch the corresponding signing diff --git a/src/utils/error.rs b/src/utils/error.rs index 8967acb7..9c8617f9 100644 --- a/src/utils/error.rs +++ b/src/utils/error.rs @@ -117,7 +117,7 @@ impl Error { StatusCode::FORBIDDEN } Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED, - NotFound => StatusCode::NOT_FOUND, + NotFound | Unrecognized => StatusCode::NOT_FOUND, LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, UserDeactivated => StatusCode::FORBIDDEN, TooLarge => StatusCode::PAYLOAD_TOO_LARGE, From 0290f1f3554ef8cca02218a396961d13dbbd44ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Oct 2022 10:42:59 +0200 Subject: [PATCH 421/445] improvement: more efficient /claim --- src/api/client_server/keys.rs | 42 ++++++++++++++++++++++------------- 1 file changed, 26 insertions(+), 16 deletions(-) diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 2de785fd..86cfaa49 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -440,25 +440,35 @@ pub(crate) async fn claim_keys_helper( let mut failures = BTreeMap::new(); - for (server, vec) in get_over_federation { + let mut futures: FuturesUnordered<_> = get_over_federation + .into_iter() + .map(|(server, vec)| async move { let mut one_time_keys_input_fed = BTreeMap::new(); - for (user_id, keys) in vec { - one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); - } - // Ignore failures - if let Ok(keys) = services() - .sending - .send_federation_request( + for (user_id, keys) in vec { + one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); + } + ( server, - federation::keys::claim_keys::v1::Request { - one_time_keys: one_time_keys_input_fed, - }, + services() + .sending + .send_federation_request( + server, + federation::keys::claim_keys::v1::Request { + one_time_keys: one_time_keys_input_fed, + }, + ) + .await, ) - .await - { - one_time_keys.extend(keys.one_time_keys); - } else { - failures.insert(server.to_string(), json!({})); + }).collect(); + + while let Some((server, response)) = futures.next().await { + match response { + Ok(keys) => { + one_time_keys.extend(keys.one_time_keys); + } + Err(_e) => { + failures.insert(server.to_string(), json!({})); + } } } From dd8f4681a2175e271abd8f88198cee6dcba59655 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Oct 2022 10:57:54 +0200 Subject: [PATCH 422/445] fix: make join should not send event id --- src/api/server_server.rs | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 03128a7f..9b32b96b 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1313,7 +1313,7 @@ pub async fn create_join_event_template_route( }) .expect("member event is valid value"); - let (_pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + let (_pdu, mut pdu_json) = services().rooms.timeline.create_hash_and_sign_event( PduBuilder { event_type: RoomEventType::RoomMember, content, @@ -1328,6 +1328,8 @@ pub async fn create_join_event_template_route( drop(state_lock); + pdu_json.remove("event_id"); + Ok(prepare_join_event::v1::Response { room_version: Some(room_version_id), event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), From fdd64fc966df79064f7590f3750d2288666fa467 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Wed, 12 Oct 2022 17:17:16 +0200 Subject: [PATCH 423/445] fix: fluffychat login works again --- Cargo.lock | 22 +++++++++++----------- Cargo.toml | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 9e58bccc..48ce6c04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2039,7 +2039,7 @@ dependencies = [ [[package]] name = "ruma" version = "0.7.4" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "assign", "js_int", @@ -2057,7 +2057,7 @@ dependencies = [ [[package]] name = "ruma-appservice-api" version = "0.7.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "ruma-common", "serde", @@ -2067,7 +2067,7 @@ dependencies = [ [[package]] name = "ruma-client-api" version = "0.15.1" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "assign", "bytes", @@ -2083,7 +2083,7 @@ dependencies = [ [[package]] name = "ruma-common" version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "base64", "bytes", @@ -2110,7 +2110,7 @@ dependencies = [ [[package]] name = "ruma-federation-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", "ruma-common", @@ -2121,7 +2121,7 @@ dependencies = [ [[package]] name = "ruma-identifiers-validation" version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", "thiserror", @@ -2130,7 +2130,7 @@ dependencies = [ [[package]] name = "ruma-identity-service-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", "ruma-common", @@ -2140,7 +2140,7 @@ dependencies = [ [[package]] name = "ruma-macros" version = "0.10.3" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "once_cell", "proc-macro-crate", @@ -2155,7 +2155,7 @@ dependencies = [ [[package]] name = "ruma-push-gateway-api" version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", "ruma-common", @@ -2166,7 +2166,7 @@ dependencies = [ [[package]] name = "ruma-signatures" version = "0.12.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "base64", "ed25519-dalek", @@ -2182,7 +2182,7 @@ dependencies = [ [[package]] name = "ruma-state-res" version = "0.8.0" -source = "git+https://github.com/ruma/ruma?rev=c2c45551335c443ede7fb9158284196899a0c696#c2c45551335c443ede7fb9158284196899a0c696" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "itertools", "js_int", diff --git a/Cargo.toml b/Cargo.toml index 0b3062da..cce6f9b8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compress # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "c2c45551335c443ede7fb9158284196899a0c696", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } +ruma = { git = "https://github.com/ruma/ruma", rev = "fba6f70c2df8294f96567f56464a46e3d237a8e9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } From 3c20c1b72e8aef3253ad1c96e8943b9803bb0a3b Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 12 Oct 2022 14:39:58 -0700 Subject: [PATCH 424/445] fix `cargo test` --- src/api/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9b32b96b..f84ca36d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -65,7 +65,7 @@ use tracing::{error, info, warn}; /// /// # Examples: /// ```rust -/// # use conduit::server_server::FedDest; +/// # use conduit::api::server_server::FedDest; /// # fn main() -> Result<(), std::net::AddrParseError> { /// FedDest::Literal("198.51.100.3:8448".parse()?); /// FedDest::Literal("[2001:db8::4:5]:443".parse()?); From 4710f739c0b9b230a155ffb2cf0947d96a4246cc Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 12 Oct 2022 17:48:09 -0700 Subject: [PATCH 425/445] clap v4 turned more things into optional features So we need to re-enable some things. See their changelog[0] for details. [0]: https://github.com/clap-rs/clap/blob/master/CHANGELOG.md#migrating --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index cce6f9b8..e7e48c83 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -83,7 +83,7 @@ thread_local = "1.1.3" hmac = "0.12.1" sha-1 = "0.10.0" # used for conduit's CLI and admin room command parsing -clap = { version = "4.0.11", default-features = false, features = ["std", "derive"] } +clap = { version = "4.0.11", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] } futures-util = { version = "0.3.17", default-features = false } # Used for reading the configuration from conduit.toml & environment variables figment = { version = "0.10.6", features = ["env", "toml"] } From fc852f8be64daa663d05dc64f5c09dd64e7a1609 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 12 Oct 2022 17:49:13 -0700 Subject: [PATCH 426/445] resolve `cargo check --features clap/deprecated` This has no functional effects. --- src/service/admin/mod.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 91103785..ad6d26b2 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -38,9 +38,9 @@ use crate::{ use super::pdu::PduBuilder; #[derive(Parser)] -#[clap(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] +#[command(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] enum AdminCommand { - #[clap(verbatim_doc_comment)] + #[command(verbatim_doc_comment)] /// Register an appservice using its registration YAML /// /// This command needs a YAML generated by an appservice (such as a bridge), @@ -80,12 +80,12 @@ enum AdminCommand { /// User will not be removed from all rooms by default. /// Use --leave-rooms to force the user to leave all rooms DeactivateUser { - #[clap(short, long)] + #[arg(short, long)] leave_rooms: bool, user_id: Box, }, - #[clap(verbatim_doc_comment)] + #[command(verbatim_doc_comment)] /// Deactivate a list of users /// /// Recommended to use in conjunction with list-local-users. @@ -100,10 +100,10 @@ enum AdminCommand { /// # User list here /// # ``` DeactivateAll { - #[clap(short, long)] + #[arg(short, long)] /// Remove users from their joined rooms leave_rooms: bool, - #[clap(short, long)] + #[arg(short, long)] /// Also deactivate admin accounts force: bool, }, @@ -114,7 +114,7 @@ enum AdminCommand { event_id: Box, }, - #[clap(verbatim_doc_comment)] + #[command(verbatim_doc_comment)] /// Parse and print a PDU from a JSON /// /// The PDU event is only checked for validity and is not added to the From 7ef9fe3454f4a025c88884ce59e3b0b58af0cf97 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Wed, 12 Oct 2022 17:50:04 -0700 Subject: [PATCH 427/445] add regression tests This way we don't regress on accident again in the future. --- src/service/admin/mod.rs | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index ad6d26b2..942df1c3 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -37,6 +37,7 @@ use crate::{ use super::pdu::PduBuilder; +#[cfg_attr(test, derive(Debug))] #[derive(Parser)] #[command(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] enum AdminCommand { @@ -1160,3 +1161,34 @@ impl Service { Ok(()) } } + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn get_help_short() { + get_help_inner("-h"); + } + + #[test] + fn get_help_long() { + get_help_inner("--help"); + } + + #[test] + fn get_help_subcommand() { + get_help_inner("help"); + } + + fn get_help_inner(input: &str) { + let error = AdminCommand::try_parse_from(["argv[0] doesn't matter", input]) + .unwrap_err() + .to_string(); + + // Search for a handful of keywords that suggest the help printed properly + assert!(error.contains("Usage:")); + assert!(error.contains("Commands:")); + assert!(error.contains("Options:")); + } +} From 9a47069f45e6565d51d017b07fddbe7fbf7177cd Mon Sep 17 00:00:00 2001 From: AndSDev Date: Mon, 29 Aug 2022 07:15:55 +0000 Subject: [PATCH 428/445] fix(client/login): username in lowercase for login by token --- src/api/client_server/session.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index f62ccbb6..7c8c1288 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -92,7 +92,7 @@ pub async fn login_route(body: Ruma) -> Result Date: Thu, 13 Oct 2022 10:14:52 +0200 Subject: [PATCH 429/445] fix: all the e2ee problems --- src/api/appservice_server.rs | 14 ++++++++++++-- src/api/client_server/to_device.rs | 5 +++-- src/api/ruma_wrapper/axum.rs | 2 +- src/api/server_server.rs | 8 ++++++-- src/database/key_value/sending.rs | 10 +++++----- src/service/pusher/mod.rs | 5 ++++- src/service/sending/mod.rs | 2 -- 7 files changed, 31 insertions(+), 15 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 6dca60be..339a0c22 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -45,11 +45,21 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = services() + let mut response = match services() .globals .default_client() .execute(reqwest_request) - .await?; + .await + { + Ok(r) => r, + Err(e) => { + warn!( + "Could not send request to appservice {:?} at {}: {}", + registration.get("id"), destination, e + ); + return Err(e.into()); + } + }; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/api/client_server/to_device.rs b/src/api/client_server/to_device.rs index f84d54f0..139b845d 100644 --- a/src/api/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -35,6 +35,7 @@ pub async fn send_event_to_device_route( map.insert(target_device_id_maybe.clone(), event.clone()); let mut messages = BTreeMap::new(); messages.insert(target_user_id.clone(), map); + let count = services().globals.next_count()?; services().sending.send_reliable_edu( target_user_id.server_name(), @@ -42,12 +43,12 @@ pub async fn send_event_to_device_route( DirectDeviceContent { sender: sender_user.clone(), ev_type: ToDeviceEventType::from(&*body.event_type), - message_id: body.txn_id.to_owned(), + message_id: count.to_string().into(), messages, }, )) .expect("DirectToDevice EDU can be serialized"), - services().globals.next_count()?, + count, )?; continue; diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs index 818cffcb..d056f3f2 100644 --- a/src/api/ruma_wrapper/axum.rs +++ b/src/api/ruma_wrapper/axum.rs @@ -281,7 +281,7 @@ where debug!("{:?}", http_request); let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { - warn!("{:?}", e); + warn!("{:?}\n{:?}", e, json_body); Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") })?; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9b32b96b..de0f8409 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -304,7 +304,10 @@ where )) } } - Err(e) => Err(e.into()), + Err(e) => { + warn!("Could not send request to {} at {}: {}", destination, actual_destination_str, e); + Err(e.into()) + }, } } @@ -831,7 +834,8 @@ pub async fn send_transaction_message_route( target_user_id, target_device_id, &ev_type.to_string(), - event.deserialize_as().map_err(|_| { + event.deserialize_as().map_err(|e| { + warn!("To-Device event is invalid: {event:?} {e}"); Error::BadRequest( ErrorKind::InvalidParam, "Event is invalid", diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index 5424e8c5..fcbe0f31 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -6,7 +6,7 @@ use crate::{ self, sending::{OutgoingKind, SendingEventType}, }, - utils, Error, Result, + utils, Error, Result, services, }; impl service::sending::Data for KeyValueDatabase { @@ -66,11 +66,11 @@ impl service::sending::Data for KeyValueDatabase { let mut keys = Vec::new(); for (outgoing_kind, event) in requests { let mut key = outgoing_kind.get_prefix(); - key.extend_from_slice(if let SendingEventType::Pdu(value) = &event { - value + if let SendingEventType::Pdu(value) = &event { + key.extend_from_slice(value) } else { - &[] - }); + key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()) + } let value = if let SendingEventType::Edu(value) = &event { &**value } else { diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 8f8610c2..767687d8 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -128,7 +128,10 @@ impl Service { Error::BadServerResponse("Push gateway returned bad response.") }) } - Err(e) => Err(e.into()), + Err(e) => { + warn!("Could not send request to pusher {}: {}", destination, e); + Err(e.into()) + }, } } diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index adaf7c0c..afa12fc7 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -158,8 +158,6 @@ impl Service { // Find events that have been added since starting the last request let new_events = self.db.queued_requests(&outgoing_kind).filter_map(|r| r.ok()).take(30).collect::>(); - // TODO: find edus - if !new_events.is_empty() { // Insert pdus we found self.db.mark_as_active(&new_events)?; From 8c6e75a0cd2d7870a32320f36af0851d5cae1f58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Thu, 13 Oct 2022 10:27:42 +0200 Subject: [PATCH 430/445] Mark unstable versions as unstable in /capabilities --- src/api/client_server/capabilities.rs | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs index 97529cf1..31d42d2f 100644 --- a/src/api/client_server/capabilities.rs +++ b/src/api/client_server/capabilities.rs @@ -11,14 +11,8 @@ pub async fn get_capabilities_route( _body: Ruma, ) -> Result { let mut available = BTreeMap::new(); - if services().globals.allow_unstable_room_versions() { - for room_version in &services().globals.unstable_room_versions { - available.insert(room_version.clone(), RoomVersionStability::Stable); - } - } else { - for room_version in &services().globals.unstable_room_versions { - available.insert(room_version.clone(), RoomVersionStability::Unstable); - } + for room_version in &services().globals.unstable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Unstable); } for room_version in &services().globals.stable_room_versions { available.insert(room_version.clone(), RoomVersionStability::Stable); From 9c922db14b53c9184f7a27ea2da75fe607efa580 Mon Sep 17 00:00:00 2001 From: exin Date: Sat, 25 Jun 2022 13:35:58 -0500 Subject: [PATCH 431/445] Lower default log level Update config-example.toml to accordingly Closes #281 --- conduit-example.toml | 2 +- src/config/mod.rs | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/conduit-example.toml b/conduit-example.toml index 5eed0708..0549030e 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -46,7 +46,7 @@ enable_lightning_bolt = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. diff --git a/src/config/mod.rs b/src/config/mod.rs index 31d96b6a..645f440d 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -250,7 +250,7 @@ fn default_max_concurrent_requests() -> u16 { } fn default_log() -> String { - "info,state_res=warn,_=off,sled=off".to_owned() + "warn,state_res=warn,_=off,sled=off".to_owned() } fn default_turn_ttl() -> u64 { From 3a40bf8ae07285882477ca83e7da714ddf02c73d Mon Sep 17 00:00:00 2001 From: exin Date: Sat, 25 Jun 2022 14:30:20 -0500 Subject: [PATCH 432/445] Add error for invalid log config Log config falls back to "warn" --- src/main.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/main.rs b/src/main.rs index 08368415..bf8bc113 100644 --- a/src/main.rs +++ b/src/main.rs @@ -110,9 +110,13 @@ async fn main() { start.await; } else { let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = EnvFilter::try_new(&config.log) - .or_else(|_| EnvFilter::try_new("info")) - .unwrap(); + let filter_layer = match EnvFilter::try_new(&config.log) { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your log config is invalid. The following error occurred: {}", e); + EnvFilter::try_new("warn").unwrap() + }, + }; let subscriber = registry.with(filter_layer).with(fmt_layer); tracing::subscriber::set_global_default(subscriber).unwrap(); From 3e6c66b899bfcbf9ba07f79ec406dc0c809c8216 Mon Sep 17 00:00:00 2001 From: exin Date: Sat, 25 Jun 2022 15:29:05 -0500 Subject: [PATCH 433/445] Fix formatting --- src/main.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/main.rs b/src/main.rs index bf8bc113..78a38ad8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -115,7 +115,7 @@ async fn main() { Err(e) => { eprintln!("It looks like your log config is invalid. The following error occurred: {}", e); EnvFilter::try_new("warn").unwrap() - }, + } }; let subscriber = registry.with(filter_layer).with(fmt_layer); From 7451abe3ea7dc5fb9225b45a0672be7bb4194d9c Mon Sep 17 00:00:00 2001 From: exin Date: Sat, 25 Jun 2022 15:58:50 -0500 Subject: [PATCH 434/445] Lower default log level for docker and debian --- DEPLOY.md | 2 +- docker-compose.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/DEPLOY.md b/DEPLOY.md index f0990dcf..1c7d1af5 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -136,7 +136,7 @@ allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. diff --git a/docker-compose.yml b/docker-compose.yml index 0a9d8f4d..d9c32b51 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,7 +31,7 @@ services: CONDUIT_ALLOW_FEDERATION: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 - #CONDUIT_LOG: info,rocket=off,_=off,sled=off + #CONDUIT_LOG: warn,rocket=off,_=off,sled=off CONDUIT_ADDRESS: 0.0.0.0 CONDUIT_CONFIG: '' # Ignore this # From 92f7f0c849edfa19ca1fd2e3f3702ca64d40ce89 Mon Sep 17 00:00:00 2001 From: exin Date: Sun, 26 Jun 2022 09:20:11 -0500 Subject: [PATCH 435/445] Lower log level commented config options --- debian/postinst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/debian/postinst b/debian/postinst index aab2480c..73e554b7 100644 --- a/debian/postinst +++ b/debian/postinst @@ -77,7 +77,7 @@ allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" EOF fi ;; From 98702da4e66404db4d987fdea42a758425f40707 Mon Sep 17 00:00:00 2001 From: exin Date: Sun, 26 Jun 2022 09:26:04 -0500 Subject: [PATCH 436/445] Lower default log level for docker --- docker/README.md | 2 +- docker/docker-compose.for-traefik.yml | 2 +- docker/docker-compose.with-traefik.yml | 2 +- tests/Complement.Dockerfile | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/docker/README.md b/docker/README.md index c980adcc..36717c4f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -33,7 +33,7 @@ docker run -d -p 8448:6167 \ -e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \ -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ - -e CONDUIT_LOG="info,rocket=off,_=off,sled=off" \ + -e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \ --name conduit matrixconduit/matrix-conduit:latest ``` diff --git a/docker/docker-compose.for-traefik.yml b/docker/docker-compose.for-traefik.yml index ca560b89..474299f6 100644 --- a/docker/docker-compose.for-traefik.yml +++ b/docker/docker-compose.for-traefik.yml @@ -31,7 +31,7 @@ services: CONDUIT_ALLOW_FEDERATION: 'true' CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 - #CONDUIT_LOG: info,rocket=off,_=off,sled=off + #CONDUIT_LOG: warn,rocket=off,_=off,sled=off CONDUIT_ADDRESS: 0.0.0.0 CONDUIT_CONFIG: '' # Ignore this diff --git a/docker/docker-compose.with-traefik.yml b/docker/docker-compose.with-traefik.yml index 6d46827f..79ebef4b 100644 --- a/docker/docker-compose.with-traefik.yml +++ b/docker/docker-compose.with-traefik.yml @@ -33,7 +33,7 @@ services: # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,_=off,sled=off" + # CONDUIT_LOG: info # default is: "warn,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index 22016e91..b9d0f8c9 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -33,7 +33,7 @@ RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml RUN echo "allow_federation = true" >> conduit.toml RUN echo "allow_encryption = true" >> conduit.toml RUN echo "allow_registration = true" >> conduit.toml -RUN echo "log = \"info,_=off,sled=off\"" >> conduit.toml +RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml # Enabled Caddy auto cert generation for complement provided CA. From bf7c4b4001ef2fdd08b1145169c6465f2f97eda3 Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 13 Oct 2022 08:06:49 -0700 Subject: [PATCH 437/445] update rust to avoid a cargo problem We were hitting [this bug][0] when trying to select a version for clap ^4. [0]: https://github.com/rust-lang/cargo/issues/10623 --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 8a76c470..a089f020 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.58-bullseye AS builder +FROM docker.io/rust:1.60-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies From 286936db3213114ae565e93a4065601a60912d0f Mon Sep 17 00:00:00 2001 From: Charles Hall Date: Thu, 13 Oct 2022 08:26:41 -0700 Subject: [PATCH 438/445] msrv is 1.63 in Cargo.toml; use that --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index a089f020..3154ebb6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,5 +1,5 @@ # syntax=docker/dockerfile:1 -FROM docker.io/rust:1.60-bullseye AS builder +FROM docker.io/rust:1.63-bullseye AS builder WORKDIR /usr/src/conduit # Install required packages to build Conduit and it's dependencies From 3a45628e1d0d8fe4b9b6227ba26e448879ce03f0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Timo=20K=C3=B6sters?= Date: Sat, 15 Oct 2022 00:28:43 +0200 Subject: [PATCH 439/445] fix: send unrecognized error on wrong http methods --- src/api/appservice_server.rs | 4 +++- src/api/client_server/keys.rs | 5 +++-- src/api/client_server/read_marker.rs | 5 ++++- src/api/server_server.rs | 7 +++++-- src/database/key_value/sending.rs | 2 +- src/main.rs | 25 +++++++++++++++++++++++-- src/service/pusher/mod.rs | 2 +- 7 files changed, 40 insertions(+), 10 deletions(-) diff --git a/src/api/appservice_server.rs b/src/api/appservice_server.rs index 339a0c22..dc319e2c 100644 --- a/src/api/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -55,7 +55,9 @@ where Err(e) => { warn!( "Could not send request to appservice {:?} at {}: {}", - registration.get("id"), destination, e + registration.get("id"), + destination, + e ); return Err(e.into()); } diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 86cfaa49..b649166a 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -443,7 +443,7 @@ pub(crate) async fn claim_keys_helper( let mut futures: FuturesUnordered<_> = get_over_federation .into_iter() .map(|(server, vec)| async move { - let mut one_time_keys_input_fed = BTreeMap::new(); + let mut one_time_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); } @@ -459,7 +459,8 @@ pub(crate) async fn claim_keys_helper( ) .await, ) - }).collect(); + }) + .collect(); while let Some((server, response)) = futures.next().await { match response { diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index 48520fc9..d529c6a8 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -1,7 +1,10 @@ use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, - events::{receipt::{ReceiptType, ReceiptThread}, RoomAccountDataEventType}, + events::{ + receipt::{ReceiptThread, ReceiptType}, + RoomAccountDataEventType, + }, MilliSecondsSinceUnixEpoch, }; use std::collections::BTreeMap; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 0064a86f..320e396b 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -305,9 +305,12 @@ where } } Err(e) => { - warn!("Could not send request to {} at {}: {}", destination, actual_destination_str, e); + warn!( + "Could not send request to {} at {}: {}", + destination, actual_destination_str, e + ); Err(e.into()) - }, + } } } diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs index fcbe0f31..3fc3e042 100644 --- a/src/database/key_value/sending.rs +++ b/src/database/key_value/sending.rs @@ -6,7 +6,7 @@ use crate::{ self, sending::{OutgoingKind, SendingEventType}, }, - utils, Error, Result, services, + services, utils, Error, Result, }; impl service::sending::Data for KeyValueDatabase { diff --git a/src/main.rs b/src/main.rs index 78a38ad8..626de3ae 100644 --- a/src/main.rs +++ b/src/main.rs @@ -145,6 +145,7 @@ async fn run_server() -> io::Result<()> { }), ) .compression() + .layer(axum::middleware::from_fn(unrecognized_method)) .layer( CorsLayer::new() .allow_origin(cors::Any) @@ -187,6 +188,22 @@ async fn run_server() -> io::Result<()> { Ok(()) } +async fn unrecognized_method( + req: axum::http::Request, + next: axum::middleware::Next, +) -> std::result::Result { + let method = req.method().clone(); + let uri = req.uri().clone(); + let inner = next.run(req).await; + if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED { + warn!("Method not allowed: {method} {uri}"); + return Ok( + Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request").into_response(), + ); + } + Ok(inner) +} + fn routes() -> Router { Router::new() .ruma_route(client_server::get_supported_versions_route) @@ -386,12 +403,16 @@ async fn shutdown_signal(handle: ServerHandle) { handle.graceful_shutdown(Some(Duration::from_secs(30))); } -async fn not_found(_uri: Uri) -> impl IntoResponse { +async fn not_found(uri: Uri) -> impl IntoResponse { + warn!("Not found: {uri}"); Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") } async fn initial_sync(_uri: Uri) -> impl IntoResponse { - Error::BadRequest(ErrorKind::GuestAccessForbidden, "Guest access not implemented") + Error::BadRequest( + ErrorKind::GuestAccessForbidden, + "Guest access not implemented", + ) } trait RouterExt { diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs index 767687d8..7fee276b 100644 --- a/src/service/pusher/mod.rs +++ b/src/service/pusher/mod.rs @@ -131,7 +131,7 @@ impl Service { Err(e) => { warn!("Could not send request to pusher {}: {}", destination, e); Err(e.into()) - }, + } } } From 3b0aa23fdfb0106add15df4f80e68e33b5ff5de7 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 13 Feb 2022 22:00:36 +0000 Subject: [PATCH 440/445] Rejoin room over federation if we are not participating in it; do not include invited users in participating servers calculation --- src/api/client_server/membership.rs | 8 ++++++-- src/database/key_value/rooms/state_cache.rs | 3 +-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index b69a6d1f..ca339938 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -474,8 +474,12 @@ async fn join_room_by_id_helper( ); let state_lock = mutex_state.lock().await; - // Ask a remote server if we don't have this room - if !services().rooms.metadata.exists(room_id)? { + // Ask a remote server if we are not participating in this room + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room_id)? + { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index ff4594f0..d0ea0c2c 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -108,8 +108,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { joinedcount += 1; } - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); + for _invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { invitedcount += 1; } From e9697f13d680abff7917264f76c96929f18d9e7d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 9 Sep 2022 20:38:36 +0200 Subject: [PATCH 441/445] Handle initiating restricted joins over federation Allows Conduit users to join restricted rooms if we are not currently participating --- src/api/client_server/membership.rs | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index b69a6d1f..975d1a52 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -17,7 +17,7 @@ use ruma::{ }, serde::Base64, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, - RoomId, RoomVersionId, UserId, + OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ @@ -519,6 +519,15 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid make_join event json received from server.") })?; + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), @@ -542,7 +551,7 @@ async fn join_room_by_id_helper( third_party_invite: None, blurhash: services().users.blurhash(sender_user)?, reason: None, - join_authorized_via_users_server: None, + join_authorized_via_users_server, }) .expect("event is valid, we just created it"), ); From cc3e1f58cc2526aea55d974d0ec89a58a81fba14 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 15 Oct 2022 10:42:14 +0200 Subject: [PATCH 442/445] Bump default room version to V9; per matrix spec recommendation --- src/config/mod.rs | 4 ++-- src/service/admin/mod.rs | 2 +- src/service/globals/mod.rs | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 645f440d..3c3a764a 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -258,6 +258,6 @@ fn default_turn_ttl() -> u64 { } // I know, it's a great name -fn default_default_room_version() -> RoomVersionId { - RoomVersionId::V6 +pub fn default_default_room_version() -> RoomVersionId { + RoomVersionId::V9 } diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 942df1c3..5766b2f2 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -871,7 +871,7 @@ impl Service { let mut content = RoomCreateEventContent::new(conduit_user.clone()); content.federate = true; content.predecessor = None; - content.room_version = RoomVersionId::V6; + content.room_version = services().globals.default_room_version(); // 1. The room create event services().rooms.timeline.build_and_append_pdu( diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index d4c9dad0..b2fadc70 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -172,8 +172,8 @@ impl Service { .supported_room_versions() .contains(&s.config.default_room_version) { - error!("Room version in config isn't supported, falling back to Version 6"); - s.config.default_room_version = RoomVersionId::V6; + error!("Room version in config isn't supported, falling back to default version"); + s.config.default_room_version = crate::config::default_default_room_version(); }; Ok(s) From 1e1a144dfa98429ef9f02d16045796b73013830d Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 15 Oct 2022 12:16:32 +0200 Subject: [PATCH 443/445] Move room version 10 out of experimental/unstable --- src/service/globals/mod.rs | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index b2fadc70..affc0516 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -127,14 +127,10 @@ impl Service { RoomVersionId::V7, RoomVersionId::V8, RoomVersionId::V9, - ]; - // Experimental, partially supported room versions - let unstable_room_versions = vec![ - RoomVersionId::V3, - RoomVersionId::V4, - RoomVersionId::V5, RoomVersionId::V10, ]; + // Experimental, partially supported room versions + let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; let mut s = Self { db, From 2d0fdddd34cc922b6de23ac1f77fcce5975b8249 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 15 Oct 2022 13:17:58 +0200 Subject: [PATCH 444/445] Do not return true for is_guest on whoami for appservice users --- src/api/client_server/account.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs index 51343ae2..ce4dadda 100644 --- a/src/api/client_server/account.rs +++ b/src/api/client_server/account.rs @@ -338,7 +338,7 @@ pub async fn whoami_route(body: Ruma) -> Result Date: Sat, 10 Sep 2022 18:14:29 +0200 Subject: [PATCH 445/445] Raise 404 when room doesn't exist Raise 404 "Room not found" when changing or accessing room visibility settings (`GET` and `PUT /_matrix/client/r0/directory/list/room/{roomId}`). See issue #290 --- src/api/client_server/directory.rs | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/src/api/client_server/directory.rs b/src/api/client_server/directory.rs index 7c4aa50b..781e9666 100644 --- a/src/api/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -85,6 +85,14 @@ pub async fn set_room_visibility_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if !db.rooms.exists(&body.room_id)? { + // Return 404 if the room doesn't exist + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room not found", + )); + } + match &body.visibility { room::Visibility::Public => { services().rooms.directory.set_public(&body.room_id)?; @@ -108,6 +116,15 @@ pub async fn set_room_visibility_route( pub async fn get_room_visibility_route( body: Ruma, ) -> Result { + + if !db.rooms.exists(&body.room_id)? { + // Return 404 if the room doesn't exist + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room not found", + )); + } + Ok(get_room_visibility::v3::Response { visibility: if services().rooms.directory.is_public_room(&body.room_id)? { room::Visibility::Public