Merge branch 'timo' into 'next'

More async

See merge request famedly/conduit!359
merge-requests/361/head
Timo Kösters 2 years ago
commit 6ef1e8c4f9

@ -30,7 +30,7 @@ tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] }
# Used for storing data permanently # Used for storing data permanently
sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true }
#sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] }
persy = { version = "1.2" , optional = true, features=["background_ops"] } persy = { version = "1.2" , optional = true, features = ["background_ops"] }
# Used for the http request / response body type for Ruma endpoints used with reqwest # Used for the http request / response body type for Ruma endpoints used with reqwest
bytes = "1.1.0" bytes = "1.1.0"
@ -64,7 +64,7 @@ regex = "1.5.4"
# jwt jsonwebtokens # jwt jsonwebtokens
jsonwebtoken = "7.2.0" jsonwebtoken = "7.2.0"
# Performance measurements # Performance measurements
tracing = { version = "0.1.26", features = ["release_max_level_warn"] } tracing = { version = "0.1.26", features = [] }
tracing-subscriber = "0.2.20" tracing-subscriber = "0.2.20"
tracing-flame = "0.1.0" tracing-flame = "0.1.0"
opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } opentelemetry = { version = "0.16.0", features = ["rt-tokio"] }
@ -76,7 +76,7 @@ crossbeam = { version = "0.8.1", optional = true }
num_cpus = "1.13.0" num_cpus = "1.13.0"
threadpool = "1.8.1" threadpool = "1.8.1"
heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true }
rocksdb = { version = "0.17.0", default-features = false, features = ["multi-threaded-cf", "zstd"], optional = true } rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true }
thread_local = "1.1.3" thread_local = "1.1.3"
# used for TURN server authentication # used for TURN server authentication

@ -137,7 +137,7 @@ pub async fn get_context_route(
.expect("All rooms have state"), .expect("All rooms have state"),
}; };
let state_ids = db.rooms.state_full_ids(shortstatehash)?; let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
let end_token = events_after let end_token = events_after
.last() .last()

@ -29,7 +29,7 @@ use ruma::{
}; };
use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use serde_json::value::{to_raw_value, RawValue as RawJsonValue};
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{hash_map::Entry, BTreeMap, HashMap},
iter, iter,
sync::{Arc, RwLock}, sync::{Arc, RwLock},
time::{Duration, Instant}, time::{Duration, Instant},
@ -48,19 +48,20 @@ pub async fn join_room_by_id_route(
) -> Result<join_room_by_id::v3::Response> { ) -> Result<join_room_by_id::v3::Response> {
let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_user = body.sender_user.as_ref().expect("user is authenticated");
let mut servers: HashSet<_> = db let mut servers = Vec::new(); // There is no body.server_name for /roomId/join
.rooms servers.extend(
.invite_state(sender_user, &body.room_id)? db.rooms
.unwrap_or_default() .invite_state(sender_user, &body.room_id)?
.iter() .unwrap_or_default()
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .iter()
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| UserId::parse(sender).ok()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.map(|user| user.server_name().to_owned()) .filter_map(|sender| UserId::parse(sender).ok())
.collect(); .map(|user| user.server_name().to_owned()),
);
servers.insert(body.room_id.server_name().to_owned());
servers.push(body.room_id.server_name().to_owned());
let ret = join_room_by_id_helper( let ret = join_room_by_id_helper(
&db, &db,
@ -91,19 +92,20 @@ pub async fn join_room_by_id_or_alias_route(
let (servers, room_id) = match Box::<RoomId>::try_from(body.room_id_or_alias) { let (servers, room_id) = match Box::<RoomId>::try_from(body.room_id_or_alias) {
Ok(room_id) => { Ok(room_id) => {
let mut servers: HashSet<_> = db let mut servers = body.server_name.clone();
.rooms servers.extend(
.invite_state(sender_user, &room_id)? db.rooms
.unwrap_or_default() .invite_state(sender_user, &room_id)?
.iter() .unwrap_or_default()
.filter_map(|event| serde_json::from_str(event.json().get()).ok()) .iter()
.filter_map(|event: serde_json::Value| event.get("sender").cloned()) .filter_map(|event| serde_json::from_str(event.json().get()).ok())
.filter_map(|sender| sender.as_str().map(|s| s.to_owned())) .filter_map(|event: serde_json::Value| event.get("sender").cloned())
.filter_map(|sender| UserId::parse(sender).ok()) .filter_map(|sender| sender.as_str().map(|s| s.to_owned()))
.map(|user| user.server_name().to_owned()) .filter_map(|sender| UserId::parse(sender).ok())
.collect(); .map(|user| user.server_name().to_owned()),
);
servers.insert(room_id.server_name().to_owned()); servers.push(room_id.server_name().to_owned());
(servers, room_id) (servers, room_id)
} }
Err(room_alias) => { Err(room_alias) => {
@ -413,7 +415,8 @@ pub async fn get_member_events_route(
Ok(get_member_events::v3::Response { Ok(get_member_events::v3::Response {
chunk: db chunk: db
.rooms .rooms
.room_state_full(&body.room_id)? .room_state_full(&body.room_id)
.await?
.iter() .iter()
.filter(|(key, _)| key.0 == StateEventType::RoomMember) .filter(|(key, _)| key.0 == StateEventType::RoomMember)
.map(|(_, pdu)| pdu.to_member_event().into()) .map(|(_, pdu)| pdu.to_member_event().into())
@ -462,7 +465,7 @@ async fn join_room_by_id_helper(
db: &Database, db: &Database,
sender_user: Option<&UserId>, sender_user: Option<&UserId>,
room_id: &RoomId, room_id: &RoomId,
servers: &HashSet<Box<ServerName>>, servers: &[Box<ServerName>],
_third_party_signed: Option<&IncomingThirdPartySigned>, _third_party_signed: Option<&IncomingThirdPartySigned>,
) -> Result<join_room_by_id::v3::Response> { ) -> Result<join_room_by_id::v3::Response> {
let sender_user = sender_user.expect("user is authenticated"); let sender_user = sender_user.expect("user is authenticated");
@ -478,7 +481,7 @@ async fn join_room_by_id_helper(
let state_lock = mutex_state.lock().await; let state_lock = mutex_state.lock().await;
// Ask a remote server if we don't have this room // Ask a remote server if we don't have this room
if !db.rooms.exists(room_id)? && room_id.server_name() != db.globals.server_name() { if !db.rooms.exists(room_id)? {
let mut make_join_response_and_server = Err(Error::BadServerResponse( let mut make_join_response_and_server = Err(Error::BadServerResponse(
"No server available to assist in joining.", "No server available to assist in joining.",
)); ));
@ -1032,6 +1035,13 @@ pub(crate) async fn invite_helper<'a>(
return Ok(()); return Ok(());
} }
if !db.rooms.is_joined(sender_user, &room_id)? {
return Err(Error::BadRequest(
ErrorKind::Forbidden,
"You don't have permission to view this room.",
));
}
let mutex_state = Arc::clone( let mutex_state = Arc::clone(
db.globals db.globals
.roomid_mutex_state .roomid_mutex_state

@ -124,7 +124,8 @@ pub async fn get_state_events_route(
Ok(get_state_events::v3::Response { Ok(get_state_events::v3::Response {
room_state: db room_state: db
.rooms .rooms
.room_state_full(&body.room_id)? .room_state_full(&body.room_id)
.await?
.values() .values()
.map(|pdu| pdu.to_state_event()) .map(|pdu| pdu.to_state_event())
.collect(), .collect(),

@ -230,18 +230,20 @@ async fn sync_helper(
for room_id in all_joined_rooms { for room_id in all_joined_rooms {
let room_id = room_id?; let room_id = room_id?;
// Get and drop the lock to wait for remaining operations to finish {
// This will make sure the we have all events until next_batch // Get and drop the lock to wait for remaining operations to finish
let mutex_insert = Arc::clone( // This will make sure the we have all events until next_batch
db.globals let mutex_insert = Arc::clone(
.roomid_mutex_insert db.globals
.write() .roomid_mutex_insert
.unwrap() .write()
.entry(room_id.clone()) .unwrap()
.or_default(), .entry(room_id.clone())
); .or_default(),
let insert_lock = mutex_insert.lock().unwrap(); );
drop(insert_lock); let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
}
let timeline_pdus; let timeline_pdus;
let limited; let limited;
@ -296,10 +298,12 @@ async fn sync_helper(
// Database queries: // Database queries:
let current_shortstatehash = db let current_shortstatehash = if let Some(s) = db.rooms.current_shortstatehash(&room_id)? {
.rooms s
.current_shortstatehash(&room_id)? } else {
.expect("All rooms have state"); error!("Room {} has no state", room_id);
continue;
};
let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?;
@ -377,11 +381,12 @@ async fn sync_helper(
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
let mut state_events = Vec::new(); let mut state_events = Vec::new();
let mut lazy_loaded = HashSet::new(); let mut lazy_loaded = HashSet::new();
let mut i = 0;
for (shortstatekey, id) in current_state_ids { for (shortstatekey, id) in current_state_ids {
let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?; let (event_type, state_key) = db.rooms.get_statekey_from_short(shortstatekey)?;
@ -394,6 +399,11 @@ async fn sync_helper(
} }
}; };
state_events.push(pdu); state_events.push(pdu);
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} else if !lazy_load_enabled } else if !lazy_load_enabled
|| body.full_state || body.full_state
|| timeline_users.contains(&state_key) || timeline_users.contains(&state_key)
@ -411,6 +421,11 @@ async fn sync_helper(
lazy_loaded.insert(uid); lazy_loaded.insert(uid);
} }
state_events.push(pdu); state_events.push(pdu);
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} }
} }
@ -462,8 +477,8 @@ async fn sync_helper(
let mut lazy_loaded = HashSet::new(); let mut lazy_loaded = HashSet::new();
if since_shortstatehash != current_shortstatehash { if since_shortstatehash != current_shortstatehash {
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash).await?;
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; let since_state_ids = db.rooms.state_full_ids(since_shortstatehash).await?;
for (key, id) in current_state_ids { for (key, id) in current_state_ids {
if body.full_state || since_state_ids.get(&key) != Some(&id) { if body.full_state || since_state_ids.get(&key) != Some(&id) {
@ -490,6 +505,7 @@ async fn sync_helper(
} }
state_events.push(pdu); state_events.push(pdu);
tokio::task::yield_now().await;
} }
} }
} }
@ -753,17 +769,19 @@ async fn sync_helper(
for result in all_left_rooms { for result in all_left_rooms {
let (room_id, left_state_events) = result?; let (room_id, left_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish {
let mutex_insert = Arc::clone( // Get and drop the lock to wait for remaining operations to finish
db.globals let mutex_insert = Arc::clone(
.roomid_mutex_insert db.globals
.write() .roomid_mutex_insert
.unwrap() .write()
.entry(room_id.clone()) .unwrap()
.or_default(), .entry(room_id.clone())
); .or_default(),
let insert_lock = mutex_insert.lock().unwrap(); );
drop(insert_lock); let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
}
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
@ -793,17 +811,19 @@ async fn sync_helper(
for result in all_invited_rooms { for result in all_invited_rooms {
let (room_id, invite_state_events) = result?; let (room_id, invite_state_events) = result?;
// Get and drop the lock to wait for remaining operations to finish {
let mutex_insert = Arc::clone( // Get and drop the lock to wait for remaining operations to finish
db.globals let mutex_insert = Arc::clone(
.roomid_mutex_insert db.globals
.write() .roomid_mutex_insert
.unwrap() .write()
.entry(room_id.clone()) .unwrap()
.or_default(), .entry(room_id.clone())
); .or_default(),
let insert_lock = mutex_insert.lock().unwrap(); );
drop(insert_lock); let insert_lock = mutex_insert.lock().unwrap();
drop(insert_lock);
}
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;

@ -18,7 +18,12 @@ pub async fn get_supported_versions_route(
_body: Ruma<get_supported_versions::IncomingRequest>, _body: Ruma<get_supported_versions::IncomingRequest>,
) -> Result<get_supported_versions::Response> { ) -> Result<get_supported_versions::Response> {
let resp = get_supported_versions::Response { let resp = get_supported_versions::Response {
versions: vec!["r0.5.0".to_owned(), "r0.6.0".to_owned(), "v1.1".to_owned(), "v1.2".to_owned()], versions: vec![
"r0.5.0".to_owned(),
"r0.6.0".to_owned(),
"v1.1".to_owned(),
"v1.2".to_owned(),
],
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
}; };

@ -213,6 +213,8 @@ impl Database {
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
disabledroomids: builder.open_tree("disabledroomids")?,
lazyloadedids: builder.open_tree("lazyloadedids")?, lazyloadedids: builder.open_tree("lazyloadedids")?,
userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?,

@ -116,7 +116,7 @@ impl Admin {
send_message(content, guard, &state_lock); send_message(content, guard, &state_lock);
} }
AdminRoomEvent::ProcessMessage(room_message) => { AdminRoomEvent::ProcessMessage(room_message) => {
let reply_message = process_admin_message(&*guard, room_message); let reply_message = process_admin_message(&*guard, room_message).await;
send_message(reply_message, guard, &state_lock); send_message(reply_message, guard, &state_lock);
} }
@ -143,7 +143,7 @@ impl Admin {
} }
// Parse and process a message from the admin room // Parse and process a message from the admin room
fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent { async fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEventContent {
let mut lines = room_message.lines(); let mut lines = room_message.lines();
let command_line = lines.next().expect("each string has at least one line"); let command_line = lines.next().expect("each string has at least one line");
let body: Vec<_> = lines.collect(); let body: Vec<_> = lines.collect();
@ -161,7 +161,7 @@ fn process_admin_message(db: &Database, room_message: String) -> RoomMessageEven
} }
}; };
match process_admin_command(db, admin_command, body) { match process_admin_command(db, admin_command, body).await {
Ok(reply_message) => reply_message, Ok(reply_message) => reply_message,
Err(error) => { Err(error) => {
let markdown_message = format!( let markdown_message = format!(
@ -231,9 +231,15 @@ enum AdminCommand {
/// List all the currently registered appservices /// List all the currently registered appservices
ListAppservices, ListAppservices,
/// List all rooms the server knows about
ListRooms,
/// List users in the database /// List users in the database
ListLocalUsers, ListLocalUsers,
/// List all rooms we are currently handling an incoming pdu from
IncomingFederation,
/// Get the auth_chain of a PDU /// Get the auth_chain of a PDU
GetAuthChain { GetAuthChain {
/// An event ID (the $ character followed by the base64 reference hash) /// An event ID (the $ character followed by the base64 reference hash)
@ -269,6 +275,7 @@ enum AdminCommand {
/// Username of the user for whom the password should be reset /// Username of the user for whom the password should be reset
username: String, username: String,
}, },
/// Create a new user /// Create a new user
CreateUser { CreateUser {
/// Username of the new user /// Username of the new user
@ -276,9 +283,14 @@ enum AdminCommand {
/// Password of the new user, if unspecified one is generated /// Password of the new user, if unspecified one is generated
password: Option<String>, password: Option<String>,
}, },
/// Disables incoming federation handling for a room.
DisableRoom { room_id: Box<RoomId> },
/// Enables incoming federation handling for a room again.
EnableRoom { room_id: Box<RoomId> },
} }
fn process_admin_command( async fn process_admin_command(
db: &Database, db: &Database,
command: AdminCommand, command: AdminCommand,
body: Vec<&str>, body: Vec<&str>,
@ -336,6 +348,26 @@ fn process_admin_command(
RoomMessageEventContent::text_plain("Failed to get appservices.") RoomMessageEventContent::text_plain("Failed to get appservices.")
} }
} }
AdminCommand::ListRooms => {
let room_ids = db.rooms.iter_ids();
let output = format!(
"Rooms:\n{}",
room_ids
.filter_map(|r| r.ok())
.map(|id| id.to_string()
+ "\tMembers: "
+ &db
.rooms
.room_joined_count(&id)
.ok()
.flatten()
.unwrap_or(0)
.to_string())
.collect::<Vec<_>>()
.join("\n")
);
RoomMessageEventContent::text_plain(output)
}
AdminCommand::ListLocalUsers => match db.users.list_local_users() { AdminCommand::ListLocalUsers => match db.users.list_local_users() {
Ok(users) => { Ok(users) => {
let mut msg: String = format!("Found {} local user account(s):\n", users.len()); let mut msg: String = format!("Found {} local user account(s):\n", users.len());
@ -344,6 +376,22 @@ fn process_admin_command(
} }
Err(e) => RoomMessageEventContent::text_plain(e.to_string()), Err(e) => RoomMessageEventContent::text_plain(e.to_string()),
}, },
AdminCommand::IncomingFederation => {
let map = db.globals.roomid_federationhandletime.read().unwrap();
let mut msg: String = format!("Handling {} incoming pdus:\n", map.len());
for (r, (e, i)) in map.iter() {
let elapsed = i.elapsed();
msg += &format!(
"{} {}: {}m{}s\n",
r,
e,
elapsed.as_secs() / 60,
elapsed.as_secs() % 60
);
}
RoomMessageEventContent::text_plain(&msg)
}
AdminCommand::GetAuthChain { event_id } => { AdminCommand::GetAuthChain { event_id } => {
let event_id = Arc::<EventId>::from(event_id); let event_id = Arc::<EventId>::from(event_id);
if let Some(event) = db.rooms.get_pdu_json(&event_id)? { if let Some(event) = db.rooms.get_pdu_json(&event_id)? {
@ -356,7 +404,9 @@ fn process_admin_command(
Error::bad_database("Invalid room id field in event in database") Error::bad_database("Invalid room id field in event in database")
})?; })?;
let start = Instant::now(); let start = Instant::now();
let count = server_server::get_auth_chain(room_id, vec![event_id], db)?.count(); let count = server_server::get_auth_chain(room_id, vec![event_id], db)
.await?
.count();
let elapsed = start.elapsed(); let elapsed = start.elapsed();
RoomMessageEventContent::text_plain(format!( RoomMessageEventContent::text_plain(format!(
"Loaded auth chain with length {} in {:?}", "Loaded auth chain with length {} in {:?}",
@ -545,6 +595,14 @@ fn process_admin_command(
"Created user with user_id: {user_id} and password: {password}" "Created user with user_id: {user_id} and password: {password}"
)) ))
} }
AdminCommand::DisableRoom { room_id } => {
db.rooms.disabledroomids.insert(room_id.as_bytes(), &[])?;
RoomMessageEventContent::text_plain("Room disabled.")
}
AdminCommand::EnableRoom { room_id } => {
db.rooms.disabledroomids.remove(room_id.as_bytes())?;
RoomMessageEventContent::text_plain("Room enabled.")
}
}; };
Ok(reply_message_content) Ok(reply_message_content)

@ -52,6 +52,8 @@ pub struct Globals {
pub roomid_mutex_insert: RwLock<HashMap<Box<RoomId>, Arc<Mutex<()>>>>, pub roomid_mutex_insert: RwLock<HashMap<Box<RoomId>, Arc<Mutex<()>>>>,
pub roomid_mutex_state: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>, pub roomid_mutex_state: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>,
pub roomid_mutex_federation: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>, // this lock will be held longer pub roomid_mutex_federation: RwLock<HashMap<Box<RoomId>, Arc<TokioMutex<()>>>>, // this lock will be held longer
pub roomid_federationhandletime: RwLock<HashMap<Box<RoomId>, (Box<EventId>, Instant)>>,
pub stateres_mutex: Arc<Mutex<()>>,
pub rotate: RotationHandler, pub rotate: RotationHandler,
} }
@ -183,6 +185,8 @@ impl Globals {
roomid_mutex_state: RwLock::new(HashMap::new()), roomid_mutex_state: RwLock::new(HashMap::new()),
roomid_mutex_insert: RwLock::new(HashMap::new()), roomid_mutex_insert: RwLock::new(HashMap::new()),
roomid_mutex_federation: RwLock::new(HashMap::new()), roomid_mutex_federation: RwLock::new(HashMap::new()),
roomid_federationhandletime: RwLock::new(HashMap::new()),
stateres_mutex: Arc::new(Mutex::new(())),
sync_receivers: RwLock::new(HashMap::new()), sync_receivers: RwLock::new(HashMap::new()),
rotate: RotationHandler::new(), rotate: RotationHandler::new(),
}; };

@ -76,6 +76,8 @@ pub struct Rooms {
pub(super) userroomid_leftstate: Arc<dyn Tree>, pub(super) userroomid_leftstate: Arc<dyn Tree>,
pub(super) roomuserid_leftcount: Arc<dyn Tree>, pub(super) roomuserid_leftcount: Arc<dyn Tree>,
pub(super) disabledroomids: Arc<dyn Tree>, // Rooms where incoming federation handling is disabled
pub(super) lazyloadedids: Arc<dyn Tree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId pub(super) lazyloadedids: Arc<dyn Tree>, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId
pub(super) userroomid_notificationcount: Arc<dyn Tree>, // NotifyCount = u64 pub(super) userroomid_notificationcount: Arc<dyn Tree>, // NotifyCount = u64
@ -142,20 +144,28 @@ impl Rooms {
/// Builds a StateMap by iterating over all keys that start /// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash. /// with state_hash, this gives the full state for the given state_hash.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, Arc<EventId>>> { pub async fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, Arc<EventId>>> {
let full_state = self let full_state = self
.load_shortstatehash_info(shortstatehash)? .load_shortstatehash_info(shortstatehash)?
.pop() .pop()
.expect("there is always one layer") .expect("there is always one layer")
.1; .1;
full_state let mut result = BTreeMap::new();
.into_iter() let mut i = 0;
.map(|compressed| self.parse_compressed_state_event(compressed)) for compressed in full_state.into_iter() {
.collect() let parsed = self.parse_compressed_state_event(compressed)?;
result.insert(parsed.0, parsed.1);
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
}
Ok(result)
} }
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn state_full( pub async fn state_full(
&self, &self,
shortstatehash: u64, shortstatehash: u64,
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> { ) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
@ -164,14 +174,13 @@ impl Rooms {
.pop() .pop()
.expect("there is always one layer") .expect("there is always one layer")
.1; .1;
Ok(full_state
.into_iter() let mut result = HashMap::new();
.map(|compressed| self.parse_compressed_state_event(compressed)) let mut i = 0;
.filter_map(|r| r.ok()) for compressed in full_state {
.map(|(_, eventid)| self.get_pdu(&eventid)) let (_, eventid) = self.parse_compressed_state_event(compressed)?;
.filter_map(|r| r.ok().flatten()) if let Some(pdu) = self.get_pdu(&eventid)? {
.map(|pdu| { result.insert(
Ok::<_, Error>((
( (
pdu.kind.to_string().into(), pdu.kind.to_string().into(),
pdu.state_key pdu.state_key
@ -180,10 +189,16 @@ impl Rooms {
.clone(), .clone(),
), ),
pdu, pdu,
)) );
}) }
.filter_map(|r| r.ok())
.collect()) i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
}
Ok(result)
} }
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
@ -226,7 +241,6 @@ impl Rooms {
} }
/// Returns the state hash for this pdu. /// Returns the state hash for this pdu.
#[tracing::instrument(skip(self))]
pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> { pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result<Option<u64>> {
self.eventid_shorteventid self.eventid_shorteventid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -529,7 +543,6 @@ impl Rooms {
} }
} }
#[tracing::instrument(skip(self, globals))]
pub fn compress_state_event( pub fn compress_state_event(
&self, &self,
shortstatekey: u64, shortstatekey: u64,
@ -546,7 +559,6 @@ impl Rooms {
} }
/// Returns shortstatekey, event id /// Returns shortstatekey, event id
#[tracing::instrument(skip(self, compressed_event))]
pub fn parse_compressed_state_event( pub fn parse_compressed_state_event(
&self, &self,
compressed_event: CompressedStateEvent, compressed_event: CompressedStateEvent,
@ -705,7 +717,6 @@ impl Rooms {
} }
/// Returns (shortstatehash, already_existed) /// Returns (shortstatehash, already_existed)
#[tracing::instrument(skip(self, globals))]
fn get_or_create_shortstatehash( fn get_or_create_shortstatehash(
&self, &self,
state_hash: &StateHashId, state_hash: &StateHashId,
@ -726,7 +737,6 @@ impl Rooms {
}) })
} }
#[tracing::instrument(skip(self, globals))]
pub fn get_or_create_shorteventid( pub fn get_or_create_shorteventid(
&self, &self,
event_id: &EventId, event_id: &EventId,
@ -757,7 +767,6 @@ impl Rooms {
Ok(short) Ok(short)
} }
#[tracing::instrument(skip(self))]
pub fn get_shortroomid(&self, room_id: &RoomId) -> Result<Option<u64>> { pub fn get_shortroomid(&self, room_id: &RoomId) -> Result<Option<u64>> {
self.roomid_shortroomid self.roomid_shortroomid
.get(room_id.as_bytes())? .get(room_id.as_bytes())?
@ -768,7 +777,6 @@ impl Rooms {
.transpose() .transpose()
} }
#[tracing::instrument(skip(self))]
pub fn get_shortstatekey( pub fn get_shortstatekey(
&self, &self,
event_type: &StateEventType, event_type: &StateEventType,
@ -806,7 +814,6 @@ impl Rooms {
Ok(short) Ok(short)
} }
#[tracing::instrument(skip(self, globals))]
pub fn get_or_create_shortroomid( pub fn get_or_create_shortroomid(
&self, &self,
room_id: &RoomId, room_id: &RoomId,
@ -824,7 +831,6 @@ impl Rooms {
}) })
} }
#[tracing::instrument(skip(self, globals))]
pub fn get_or_create_shortstatekey( pub fn get_or_create_shortstatekey(
&self, &self,
event_type: &StateEventType, event_type: &StateEventType,
@ -865,7 +871,6 @@ impl Rooms {
Ok(short) Ok(short)
} }
#[tracing::instrument(skip(self))]
pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result<Arc<EventId>> { pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result<Arc<EventId>> {
if let Some(id) = self if let Some(id) = self
.shorteventid_cache .shorteventid_cache
@ -894,7 +899,6 @@ impl Rooms {
Ok(event_id) Ok(event_id)
} }
#[tracing::instrument(skip(self))]
pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> {
if let Some(id) = self if let Some(id) = self
.shortstatekey_cache .shortstatekey_cache
@ -938,12 +942,12 @@ impl Rooms {
/// Returns the full room state. /// Returns the full room state.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn room_state_full( pub async fn room_state_full(
&self, &self,
room_id: &RoomId, room_id: &RoomId,
) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> { ) -> Result<HashMap<(StateEventType, String), Arc<PduEvent>>> {
if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? {
self.state_full(current_shortstatehash) self.state_full(current_shortstatehash).await
} else { } else {
Ok(HashMap::new()) Ok(HashMap::new())
} }
@ -980,14 +984,12 @@ impl Rooms {
} }
/// Returns the `count` of this pdu's id. /// Returns the `count` of this pdu's id.
#[tracing::instrument(skip(self))]
pub fn pdu_count(&self, pdu_id: &[u8]) -> Result<u64> { pub fn pdu_count(&self, pdu_id: &[u8]) -> Result<u64> {
utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..]) utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::<u64>()..])
.map_err(|_| Error::bad_database("PDU has invalid count bytes.")) .map_err(|_| Error::bad_database("PDU has invalid count bytes."))
} }
/// Returns the `count` of this pdu's id. /// Returns the `count` of this pdu's id.
#[tracing::instrument(skip(self))]
pub fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<u64>> { pub fn get_pdu_count(&self, event_id: &EventId) -> Result<Option<u64>> {
self.eventid_pduid self.eventid_pduid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -1016,7 +1018,6 @@ impl Rooms {
} }
/// Returns the json of a pdu. /// Returns the json of a pdu.
#[tracing::instrument(skip(self))]
pub fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> { pub fn get_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
self.eventid_pduid self.eventid_pduid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -1035,7 +1036,6 @@ impl Rooms {
} }
/// Returns the json of a pdu. /// Returns the json of a pdu.
#[tracing::instrument(skip(self))]
pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> { pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result<Option<CanonicalJsonObject>> {
self.eventid_outlierpdu self.eventid_outlierpdu
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -1046,7 +1046,6 @@ impl Rooms {
} }
/// Returns the json of a pdu. /// Returns the json of a pdu.
#[tracing::instrument(skip(self))]
pub fn get_non_outlier_pdu_json( pub fn get_non_outlier_pdu_json(
&self, &self,
event_id: &EventId, event_id: &EventId,
@ -1066,7 +1065,6 @@ impl Rooms {
} }
/// Returns the pdu's id. /// Returns the pdu's id.
#[tracing::instrument(skip(self))]
pub fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> { pub fn get_pdu_id(&self, event_id: &EventId) -> Result<Option<Vec<u8>>> {
self.eventid_pduid.get(event_id.as_bytes()) self.eventid_pduid.get(event_id.as_bytes())
} }
@ -1074,7 +1072,6 @@ impl Rooms {
/// Returns the pdu. /// Returns the pdu.
/// ///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline. /// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
#[tracing::instrument(skip(self))]
pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> { pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
self.eventid_pduid self.eventid_pduid
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -1093,7 +1090,6 @@ impl Rooms {
/// Returns the pdu. /// Returns the pdu.
/// ///
/// Checks the `eventid_outlierpdu` Tree if not found in the timeline. /// Checks the `eventid_outlierpdu` Tree if not found in the timeline.
#[tracing::instrument(skip(self))]
pub fn get_pdu(&self, event_id: &EventId) -> Result<Option<Arc<PduEvent>>> { pub fn get_pdu(&self, event_id: &EventId) -> Result<Option<Arc<PduEvent>>> {
if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) {
return Ok(Some(Arc::clone(p))); return Ok(Some(Arc::clone(p)));
@ -1130,7 +1126,6 @@ impl Rooms {
/// Returns the pdu. /// Returns the pdu.
/// ///
/// This does __NOT__ check the outliers `Tree`. /// This does __NOT__ check the outliers `Tree`.
#[tracing::instrument(skip(self))]
pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<Option<PduEvent>> { pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result<Option<PduEvent>> {
self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| {
Ok(Some( Ok(Some(
@ -1141,7 +1136,6 @@ impl Rooms {
} }
/// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`. /// Returns the pdu as a `BTreeMap<String, CanonicalJsonValue>`.
#[tracing::instrument(skip(self))]
pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>> { pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result<Option<CanonicalJsonObject>> {
self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| {
Ok(Some( Ok(Some(
@ -1230,7 +1224,6 @@ impl Rooms {
} }
/// Returns the pdu from the outlier tree. /// Returns the pdu from the outlier tree.
#[tracing::instrument(skip(self))]
pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result<Option<PduEvent>> { pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result<Option<PduEvent>> {
self.eventid_outlierpdu self.eventid_outlierpdu
.get(event_id.as_bytes())? .get(event_id.as_bytes())?
@ -2858,6 +2851,18 @@ impl Rooms {
Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) Ok(self.publicroomids.get(room_id.as_bytes())?.is_some())
} }
#[tracing::instrument(skip(self))]
pub fn iter_ids(&self) -> impl Iterator<Item = Result<Box<RoomId>>> + '_ {
self.roomid_shortroomid.iter().map(|(bytes, _)| {
RoomId::parse(
utils::string_from_bytes(&bytes).map_err(|_| {
Error::bad_database("Room ID in publicroomids is invalid unicode.")
})?,
)
.map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid."))
})
}
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn public_rooms(&self) -> impl Iterator<Item = Result<Box<RoomId>>> + '_ { pub fn public_rooms(&self) -> impl Iterator<Item = Result<Box<RoomId>>> + '_ {
self.publicroomids.iter().map(|(bytes, _)| { self.publicroomids.iter().map(|(bytes, _)| {
@ -3140,6 +3145,10 @@ impl Rooms {
.transpose() .transpose()
} }
pub fn is_disabled(&self, room_id: &RoomId) -> Result<bool> {
Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some())
}
/// Returns an iterator over all rooms this user joined. /// Returns an iterator over all rooms this user joined.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn rooms_joined<'a>( pub fn rooms_joined<'a>(

@ -338,7 +338,7 @@ impl Credentials for XMatrix {
"origin" => origin = Some(value.try_into().ok()?), "origin" => origin = Some(value.try_into().ok()?),
"key" => key = Some(value.to_owned()), "key" => key = Some(value.to_owned()),
"sig" => sig = Some(value.to_owned()), "sig" => sig = Some(value.to_owned()),
_ => warn!( _ => debug!(
"Unexpected field `{}` in X-Matrix Authorization header", "Unexpected field `{}` in X-Matrix Authorization header",
name name
), ),

@ -691,7 +691,7 @@ pub async fn send_transaction_message_route(
.roomid_mutex_federation .roomid_mutex_federation
.write() .write()
.unwrap() .unwrap()
.entry(room_id.clone()) .entry(room_id.to_owned())
.or_default(), .or_default(),
); );
let mutex_lock = mutex.lock().await; let mutex_lock = mutex.lock().await;
@ -768,7 +768,7 @@ pub async fn send_transaction_message_route(
)?; )?;
} else { } else {
// TODO fetch missing events // TODO fetch missing events
debug!("No known event ids in read receipt: {:?}", user_updates); info!("No known event ids in read receipt: {:?}", user_updates);
} }
} }
} }
@ -926,6 +926,13 @@ pub(crate) async fn handle_incoming_pdu<'a>(
} }
} }
match db.rooms.is_disabled(room_id) {
Ok(false) => {}
_ => {
return Err("Federation of this room is currently disabled on this server.".to_owned());
}
}
// 1. Skip the PDU if we already have it as a timeline event // 1. Skip the PDU if we already have it as a timeline event
if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) {
return Ok(Some(pdu_id.to_vec())); return Ok(Some(pdu_id.to_vec()));
@ -1038,6 +1045,34 @@ pub(crate) async fn handle_incoming_pdu<'a>(
let mut errors = 0; let mut errors = 0;
for prev_id in dbg!(sorted) { for prev_id in dbg!(sorted) {
match db.rooms.is_disabled(room_id) {
Ok(false) => {}
_ => {
return Err(
"Federation of this room is currently disabled on this server.".to_owned(),
);
}
}
if let Some((time, tries)) = db
.globals
.bad_event_ratelimiter
.read()
.unwrap()
.get(&*prev_id)
{
// Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) {
min_elapsed_duration = Duration::from_secs(60 * 60 * 24);
}
if time.elapsed() < min_elapsed_duration {
info!("Backing off from {}", prev_id);
continue;
}
}
if errors >= 5 { if errors >= 5 {
break; break;
} }
@ -1047,7 +1082,11 @@ pub(crate) async fn handle_incoming_pdu<'a>(
} }
let start_time = Instant::now(); let start_time = Instant::now();
let event_id = pdu.event_id.clone(); db.globals
.roomid_federationhandletime
.write()
.unwrap()
.insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time));
if let Err(e) = upgrade_outlier_to_timeline_pdu( if let Err(e) = upgrade_outlier_to_timeline_pdu(
pdu, pdu,
json, json,
@ -1060,19 +1099,44 @@ pub(crate) async fn handle_incoming_pdu<'a>(
.await .await
{ {
errors += 1; errors += 1;
warn!("Prev event {} failed: {}", event_id, e); warn!("Prev event {} failed: {}", prev_id, e);
match db
.globals
.bad_event_ratelimiter
.write()
.unwrap()
.entry((*prev_id).to_owned())
{
hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1));
}
hash_map::Entry::Occupied(mut e) => {
*e.get_mut() = (Instant::now(), e.get().1 + 1)
}
}
} }
let elapsed = start_time.elapsed(); let elapsed = start_time.elapsed();
db.globals
.roomid_federationhandletime
.write()
.unwrap()
.remove(&room_id.to_owned());
warn!( warn!(
"Handling prev event {} took {}m{}s", "Handling prev event {} took {}m{}s",
event_id, prev_id,
elapsed.as_secs() / 60, elapsed.as_secs() / 60,
elapsed.as_secs() % 60 elapsed.as_secs() % 60
); );
} }
} }
upgrade_outlier_to_timeline_pdu( let start_time = Instant::now();
db.globals
.roomid_federationhandletime
.write()
.unwrap()
.insert(room_id.to_owned(), (event_id.to_owned(), start_time));
let r = upgrade_outlier_to_timeline_pdu(
incoming_pdu, incoming_pdu,
val, val,
&create_event, &create_event,
@ -1081,10 +1145,17 @@ pub(crate) async fn handle_incoming_pdu<'a>(
room_id, room_id,
pub_key_map, pub_key_map,
) )
.await .await;
db.globals
.roomid_federationhandletime
.write()
.unwrap()
.remove(&room_id.to_owned());
r
} }
#[tracing::instrument(skip_all)] #[tracing::instrument(skip(create_event, value, db, pub_key_map))]
fn handle_outlier_pdu<'a>( fn handle_outlier_pdu<'a>(
origin: &'a ServerName, origin: &'a ServerName,
create_event: &'a PduEvent, create_event: &'a PduEvent,
@ -1166,7 +1237,7 @@ fn handle_outlier_pdu<'a>(
.await; .await;
// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events
debug!( info!(
"Auth check for {} based on auth events", "Auth check for {} based on auth events",
incoming_pdu.event_id incoming_pdu.event_id
); );
@ -1221,19 +1292,19 @@ fn handle_outlier_pdu<'a>(
return Err("Event has failed auth check with auth events.".to_owned()); return Err("Event has failed auth check with auth events.".to_owned());
} }
debug!("Validation successful."); info!("Validation successful.");
// 7. Persist the event as an outlier. // 7. Persist the event as an outlier.
db.rooms db.rooms
.add_pdu_outlier(&incoming_pdu.event_id, &val) .add_pdu_outlier(&incoming_pdu.event_id, &val)
.map_err(|_| "Failed to add pdu as outlier.".to_owned())?; .map_err(|_| "Failed to add pdu as outlier.".to_owned())?;
debug!("Added pdu as outlier."); info!("Added pdu as outlier.");
Ok((Arc::new(incoming_pdu), val)) Ok((Arc::new(incoming_pdu), val))
}) })
} }
#[tracing::instrument(skip_all)] #[tracing::instrument(skip(incoming_pdu, val, create_event, db, pub_key_map))]
async fn upgrade_outlier_to_timeline_pdu( async fn upgrade_outlier_to_timeline_pdu(
incoming_pdu: Arc<PduEvent>, incoming_pdu: Arc<PduEvent>,
val: BTreeMap<String, CanonicalJsonValue>, val: BTreeMap<String, CanonicalJsonValue>,
@ -1255,6 +1326,8 @@ async fn upgrade_outlier_to_timeline_pdu(
return Err("Event has been soft failed".into()); return Err("Event has been soft failed".into());
} }
info!("Upgrading {} to timeline pdu", incoming_pdu.event_id);
let create_event_content: RoomCreateEventContent = let create_event_content: RoomCreateEventContent =
serde_json::from_str(create_event.content.get()).map_err(|e| { serde_json::from_str(create_event.content.get()).map_err(|e| {
warn!("Invalid create event: {}", e); warn!("Invalid create event: {}", e);
@ -1270,7 +1343,7 @@ async fn upgrade_outlier_to_timeline_pdu(
// TODO: if we know the prev_events of the incoming event we can avoid the request and build // TODO: if we know the prev_events of the incoming event we can avoid the request and build
// the state from a known point and resolve if > 1 prev_event // the state from a known point and resolve if > 1 prev_event
debug!("Requesting state at event."); info!("Requesting state at event");
let mut state_at_incoming_event = None; let mut state_at_incoming_event = None;
if incoming_pdu.prev_events.len() == 1 { if incoming_pdu.prev_events.len() == 1 {
@ -1280,11 +1353,14 @@ async fn upgrade_outlier_to_timeline_pdu(
.pdu_shortstatehash(prev_event) .pdu_shortstatehash(prev_event)
.map_err(|_| "Failed talking to db".to_owned())?; .map_err(|_| "Failed talking to db".to_owned())?;
let state = let state = if let Some(shortstatehash) = prev_event_sstatehash {
prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); Some(db.rooms.state_full_ids(shortstatehash).await)
} else {
None
};
if let Some(Ok(mut state)) = state { if let Some(Ok(mut state)) = state {
warn!("Using cached state"); info!("Using cached state");
let prev_pdu = let prev_pdu =
db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| {
"Could not find prev event, but we know the state.".to_owned() "Could not find prev event, but we know the state.".to_owned()
@ -1307,7 +1383,7 @@ async fn upgrade_outlier_to_timeline_pdu(
state_at_incoming_event = Some(state); state_at_incoming_event = Some(state);
} }
} else { } else {
warn!("Calculating state at event using state res"); info!("Calculating state at event using state res");
let mut extremity_sstatehashes = HashMap::new(); let mut extremity_sstatehashes = HashMap::new();
let mut okay = true; let mut okay = true;
@ -1337,6 +1413,7 @@ async fn upgrade_outlier_to_timeline_pdu(
let mut leaf_state: BTreeMap<_, _> = db let mut leaf_state: BTreeMap<_, _> = db
.rooms .rooms
.state_full_ids(sstatehash) .state_full_ids(sstatehash)
.await
.map_err(|_| "Failed to ask db for room state.".to_owned())?; .map_err(|_| "Failed to ask db for room state.".to_owned())?;
if let Some(state_key) = &prev_event.state_key { if let Some(state_key) = &prev_event.state_key {
@ -1368,6 +1445,7 @@ async fn upgrade_outlier_to_timeline_pdu(
auth_chain_sets.push( auth_chain_sets.push(
get_auth_chain(room_id, starting_events, db) get_auth_chain(room_id, starting_events, db)
.await
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.collect(), .collect(),
); );
@ -1375,18 +1453,18 @@ async fn upgrade_outlier_to_timeline_pdu(
fork_states.push(state); fork_states.push(state);
} }
state_at_incoming_event = match state_res::resolve( let lock = db.globals.stateres_mutex.lock();
room_version_id,
&fork_states, let result = state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| {
auth_chain_sets, let res = db.rooms.get_pdu(id);
|id| { if let Err(e) = &res {
let res = db.rooms.get_pdu(id); error!("LOOK AT ME Failed to fetch event: {}", e);
if let Err(e) = &res { }
error!("LOOK AT ME Failed to fetch event: {}", e); res.ok().flatten()
} });
res.ok().flatten() drop(lock);
},
) { state_at_incoming_event = match result {
Ok(new_state) => Some( Ok(new_state) => Some(
new_state new_state
.into_iter() .into_iter()
@ -1407,12 +1485,12 @@ async fn upgrade_outlier_to_timeline_pdu(
warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e);
None None
} }
}; }
} }
} }
if state_at_incoming_event.is_none() { if state_at_incoming_event.is_none() {
warn!("Calling /state_ids"); info!("Calling /state_ids");
// Call /state_ids to find out what the state at this pdu is. We trust the server's // Call /state_ids to find out what the state at this pdu is. We trust the server's
// response to some extend, but we still do a lot of checks on the events // response to some extend, but we still do a lot of checks on the events
match db match db
@ -1428,7 +1506,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.await .await
{ {
Ok(res) => { Ok(res) => {
warn!("Fetching state events at event."); info!("Fetching state events at event.");
let state_vec = fetch_and_handle_outliers( let state_vec = fetch_and_handle_outliers(
db, db,
origin, origin,
@ -1494,6 +1572,7 @@ async fn upgrade_outlier_to_timeline_pdu(
let state_at_incoming_event = let state_at_incoming_event =
state_at_incoming_event.expect("we always set this to some above"); state_at_incoming_event.expect("we always set this to some above");
info!("Starting auth check");
// 11. Check the auth of the event passes based on the state of the event // 11. Check the auth of the event passes based on the state of the event
let check_result = state_res::event_auth::auth_check( let check_result = state_res::event_auth::auth_check(
&room_version, &room_version,
@ -1513,7 +1592,7 @@ async fn upgrade_outlier_to_timeline_pdu(
if !check_result { if !check_result {
return Err("Event has failed auth check with state at the event.".into()); return Err("Event has failed auth check with state at the event.".into());
} }
debug!("Auth check succeeded."); info!("Auth check succeeded");
// We start looking at current room state now, so lets lock the room // We start looking at current room state now, so lets lock the room
@ -1529,6 +1608,7 @@ async fn upgrade_outlier_to_timeline_pdu(
// Now we calculate the set of extremities this room has after the incoming event has been // Now we calculate the set of extremities this room has after the incoming event has been
// applied. We start with the previous extremities (aka leaves) // applied. We start with the previous extremities (aka leaves)
info!("Calculating extremities");
let mut extremities = db let mut extremities = db
.rooms .rooms
.get_pdu_leaves(room_id) .get_pdu_leaves(room_id)
@ -1544,16 +1624,18 @@ async fn upgrade_outlier_to_timeline_pdu(
// Only keep those extremities were not referenced yet // Only keep those extremities were not referenced yet
extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true)));
let current_sstatehash = db info!("Compressing state at event");
.rooms let state_ids_compressed = state_at_incoming_event
.current_shortstatehash(room_id) .iter()
.map_err(|_| "Failed to load current state hash.".to_owned())? .map(|(shortstatekey, id)| {
.expect("every room has state"); db.rooms
.compress_state_event(*shortstatekey, id, &db.globals)
.map_err(|_| "Failed to compress_state_event".to_owned())
})
.collect::<Result<_, _>>()?;
let current_state_ids = db // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
.rooms info!("Starting soft fail auth check");
.state_full_ids(current_sstatehash)
.map_err(|_| "Failed to load room state.")?;
let auth_events = db let auth_events = db
.rooms .rooms
@ -1566,18 +1648,6 @@ async fn upgrade_outlier_to_timeline_pdu(
) )
.map_err(|_| "Failed to get_auth_events.".to_owned())?; .map_err(|_| "Failed to get_auth_events.".to_owned())?;
let state_ids_compressed = state_at_incoming_event
.iter()
.map(|(shortstatekey, id)| {
db.rooms
.compress_state_event(*shortstatekey, id, &db.globals)
.map_err(|_| "Failed to compress_state_event".to_owned())
})
.collect::<Result<_, _>>()?;
// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
debug!("starting soft fail auth check");
let soft_fail = !state_res::event_auth::auth_check( let soft_fail = !state_res::event_auth::auth_check(
&room_version, &room_version,
&incoming_pdu, &incoming_pdu,
@ -1610,8 +1680,23 @@ async fn upgrade_outlier_to_timeline_pdu(
} }
if incoming_pdu.state_key.is_some() { if incoming_pdu.state_key.is_some() {
info!("Loading current room state ids");
let current_sstatehash = db
.rooms
.current_shortstatehash(room_id)
.map_err(|_| "Failed to load current state hash.".to_owned())?
.expect("every room has state");
let current_state_ids = db
.rooms
.state_full_ids(current_sstatehash)
.await
.map_err(|_| "Failed to load room state.")?;
info!("Preparing for stateres to derive new room state");
let mut extremity_sstatehashes = HashMap::new(); let mut extremity_sstatehashes = HashMap::new();
info!("Loading extremities");
for id in dbg!(&extremities) { for id in dbg!(&extremities) {
match db match db
.rooms .rooms
@ -1671,6 +1756,7 @@ async fn upgrade_outlier_to_timeline_pdu(
let new_room_state = if fork_states.is_empty() { let new_room_state = if fork_states.is_empty() {
return Err("State is empty.".to_owned()); return Err("State is empty.".to_owned());
} else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) {
info!("State resolution trivial");
// There was only one state, so it has to be the room's current state (because that is // There was only one state, so it has to be the room's current state (because that is
// always included) // always included)
fork_states[0] fork_states[0]
@ -1682,6 +1768,7 @@ async fn upgrade_outlier_to_timeline_pdu(
}) })
.collect::<Result<_, _>>()? .collect::<Result<_, _>>()?
} else { } else {
info!("Loading auth chains");
// We do need to force an update to this room's state // We do need to force an update to this room's state
update_state = true; update_state = true;
@ -1693,11 +1780,14 @@ async fn upgrade_outlier_to_timeline_pdu(
state.iter().map(|(_, id)| id.clone()).collect(), state.iter().map(|(_, id)| id.clone()).collect(),
db, db,
) )
.await
.map_err(|_| "Failed to load auth chain.".to_owned())? .map_err(|_| "Failed to load auth chain.".to_owned())?
.collect(), .collect(),
); );
} }
info!("Loading fork states");
let fork_states: Vec<_> = fork_states let fork_states: Vec<_> = fork_states
.into_iter() .into_iter()
.map(|map| { .map(|map| {
@ -1715,6 +1805,9 @@ async fn upgrade_outlier_to_timeline_pdu(
}) })
.collect(); .collect();
info!("Resolving state");
let lock = db.globals.stateres_mutex.lock();
let state = match state_res::resolve( let state = match state_res::resolve(
room_version_id, room_version_id,
&fork_states, &fork_states,
@ -1733,6 +1826,10 @@ async fn upgrade_outlier_to_timeline_pdu(
} }
}; };
drop(lock);
info!("State resolution done. Compressing state");
state state
.into_iter() .into_iter()
.map(|((event_type, state_key), event_id)| { .map(|((event_type, state_key), event_id)| {
@ -1753,13 +1850,14 @@ async fn upgrade_outlier_to_timeline_pdu(
// Set the new room state to the resolved state // Set the new room state to the resolved state
if update_state { if update_state {
info!("Forcing new room state");
db.rooms db.rooms
.force_state(room_id, new_room_state, db) .force_state(room_id, new_room_state, db)
.map_err(|_| "Failed to set new room state.".to_owned())?; .map_err(|_| "Failed to set new room state.".to_owned())?;
} }
debug!("Updated resolved state");
} }
info!("Appending pdu to timeline");
extremities.insert(incoming_pdu.event_id.clone()); extremities.insert(incoming_pdu.event_id.clone());
// Now that the event has passed all auth it is added into the timeline. // Now that the event has passed all auth it is added into the timeline.
@ -1780,7 +1878,7 @@ async fn upgrade_outlier_to_timeline_pdu(
"Failed to add pdu to db.".to_owned() "Failed to add pdu to db.".to_owned()
})?; })?;
debug!("Appended incoming pdu."); info!("Appended incoming pdu");
// Event has passed all auth/stateres checks // Event has passed all auth/stateres checks
drop(state_lock); drop(state_lock);
@ -1844,17 +1942,23 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
let mut todo_auth_events = vec![Arc::clone(id)]; let mut todo_auth_events = vec![Arc::clone(id)];
let mut events_in_reverse_order = Vec::new(); let mut events_in_reverse_order = Vec::new();
let mut events_all = HashSet::new(); let mut events_all = HashSet::new();
let mut i = 0;
while let Some(next_id) = todo_auth_events.pop() { while let Some(next_id) = todo_auth_events.pop() {
if events_all.contains(&next_id) { if events_all.contains(&next_id) {
continue; continue;
} }
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) { if let Ok(Some(_)) = db.rooms.get_pdu(&next_id) {
trace!("Found {} in db", id); trace!("Found {} in db", id);
continue; continue;
} }
warn!("Fetching {} over federation.", next_id); info!("Fetching {} over federation.", next_id);
match db match db
.sending .sending
.send_federation_request( .send_federation_request(
@ -1865,7 +1969,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
.await .await
{ {
Ok(res) => { Ok(res) => {
warn!("Got {} over federation", next_id); info!("Got {} over federation", next_id);
let (calculated_event_id, value) = let (calculated_event_id, value) =
match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) { match crate::pdu::gen_event_id_canonical_json(&res.pdu, &db) {
Ok(t) => t, Ok(t) => t,
@ -2187,7 +2291,7 @@ fn append_incoming_pdu<'a>(
} }
#[tracing::instrument(skip(starting_events, db))] #[tracing::instrument(skip(starting_events, db))]
pub(crate) fn get_auth_chain<'a>( pub(crate) async fn get_auth_chain<'a>(
room_id: &RoomId, room_id: &RoomId,
starting_events: Vec<Arc<EventId>>, starting_events: Vec<Arc<EventId>>,
db: &'a Database, db: &'a Database,
@ -2196,10 +2300,15 @@ pub(crate) fn get_auth_chain<'a>(
let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS];
let mut i = 0;
for id in starting_events { for id in starting_events {
let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?;
let bucket_id = (short % NUM_BUCKETS as u64) as usize; let bucket_id = (short % NUM_BUCKETS as u64) as usize;
buckets[bucket_id].insert((short, id.clone())); buckets[bucket_id].insert((short, id.clone()));
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} }
let mut full_auth_chain = HashSet::new(); let mut full_auth_chain = HashSet::new();
@ -2222,6 +2331,7 @@ pub(crate) fn get_auth_chain<'a>(
let mut chunk_cache = HashSet::new(); let mut chunk_cache = HashSet::new();
let mut hits2 = 0; let mut hits2 = 0;
let mut misses2 = 0; let mut misses2 = 0;
let mut i = 0;
for (sevent_id, event_id) in chunk { for (sevent_id, event_id) in chunk {
if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? {
hits2 += 1; hits2 += 1;
@ -2237,6 +2347,11 @@ pub(crate) fn get_auth_chain<'a>(
auth_chain.len() auth_chain.len()
); );
chunk_cache.extend(auth_chain.iter()); chunk_cache.extend(auth_chain.iter());
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
}; };
} }
println!( println!(
@ -2457,7 +2572,7 @@ pub async fn get_event_authorization_route(
let room_id = <&RoomId>::try_from(room_id_str) let room_id = <&RoomId>::try_from(room_id_str)
.map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?;
let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids = get_auth_chain(room_id, vec![Arc::from(&*body.event_id)], &db).await?;
Ok(get_event_authorization::v1::Response { Ok(get_event_authorization::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2502,7 +2617,8 @@ pub async fn get_room_state_route(
let pdus = db let pdus = db
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)
.await?
.into_iter() .into_iter()
.map(|(_, id)| { .map(|(_, id)| {
PduEvent::convert_to_outgoing_federation_event( PduEvent::convert_to_outgoing_federation_event(
@ -2511,7 +2627,8 @@ pub async fn get_room_state_route(
}) })
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids =
get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?;
Ok(get_room_state::v1::Response { Ok(get_room_state::v1::Response {
auth_chain: auth_chain_ids auth_chain: auth_chain_ids
@ -2561,12 +2678,14 @@ pub async fn get_room_state_ids_route(
let pdu_ids = db let pdu_ids = db
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)
.await?
.into_iter() .into_iter()
.map(|(_, id)| (*id).to_owned()) .map(|(_, id)| (*id).to_owned())
.collect(); .collect();
let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db)?; let auth_chain_ids =
get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)], &db).await?;
Ok(get_room_state_ids::v1::Response { Ok(get_room_state_ids::v1::Response {
auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(),
@ -2872,12 +2991,13 @@ async fn create_join_event(
))?; ))?;
drop(mutex_lock); drop(mutex_lock);
let state_ids = db.rooms.state_full_ids(shortstatehash)?; let state_ids = db.rooms.state_full_ids(shortstatehash).await?;
let auth_chain_ids = get_auth_chain( let auth_chain_ids = get_auth_chain(
room_id, room_id,
state_ids.iter().map(|(_, id)| id.clone()).collect(), state_ids.iter().map(|(_, id)| id.clone()).collect(),
db, db,
)?; )
.await?;
let servers = db let servers = db
.rooms .rooms

Loading…
Cancel
Save