improvement: better e2ee over fed, faster incoming event handling

merge-requests/163/head
Timo Kösters 3 years ago
parent 72dd95f500
commit 81e056417c
No known key found for this signature in database
GPG Key ID: 24DA7517711A2BA4

39
Cargo.lock generated

@ -2045,7 +2045,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma" name = "ruma"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"assign", "assign",
"js_int", "js_int",
@ -2066,7 +2066,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-api" name = "ruma-api"
version = "0.18.3" version = "0.18.3"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"bytes", "bytes",
"http", "http",
@ -2082,7 +2082,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-api-macros" name = "ruma-api-macros"
version = "0.18.3" version = "0.18.3"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2093,7 +2093,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-appservice-api" name = "ruma-appservice-api"
version = "0.4.0" version = "0.4.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"ruma-api", "ruma-api",
"ruma-common", "ruma-common",
@ -2107,7 +2107,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-client-api" name = "ruma-client-api"
version = "0.12.2" version = "0.12.2"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"assign", "assign",
"bytes", "bytes",
@ -2127,7 +2127,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-common" name = "ruma-common"
version = "0.6.0" version = "0.6.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"indexmap", "indexmap",
"js_int", "js_int",
@ -2142,7 +2142,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events" name = "ruma-events"
version = "0.24.4" version = "0.24.4"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"indoc", "indoc",
"js_int", "js_int",
@ -2158,7 +2158,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-events-macros" name = "ruma-events-macros"
version = "0.24.4" version = "0.24.4"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2169,7 +2169,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-federation-api" name = "ruma-federation-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2184,7 +2184,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers" name = "ruma-identifiers"
version = "0.20.0" version = "0.20.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"paste", "paste",
"rand 0.8.4", "rand 0.8.4",
@ -2198,7 +2198,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-macros" name = "ruma-identifiers-macros"
version = "0.20.0" version = "0.20.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"quote", "quote",
"ruma-identifiers-validation", "ruma-identifiers-validation",
@ -2208,12 +2208,15 @@ dependencies = [
[[package]] [[package]]
name = "ruma-identifiers-validation" name = "ruma-identifiers-validation"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [
"thiserror",
]
[[package]] [[package]]
name = "ruma-identity-service-api" name = "ruma-identity-service-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2226,7 +2229,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-push-gateway-api" name = "ruma-push-gateway-api"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"js_int", "js_int",
"ruma-api", "ruma-api",
@ -2241,7 +2244,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-serde" name = "ruma-serde"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"bytes", "bytes",
"form_urlencoded", "form_urlencoded",
@ -2255,7 +2258,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-serde-macros" name = "ruma-serde-macros"
version = "0.5.0" version = "0.5.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"proc-macro-crate", "proc-macro-crate",
"proc-macro2", "proc-macro2",
@ -2266,7 +2269,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-signatures" name = "ruma-signatures"
version = "0.9.0" version = "0.9.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"base64 0.13.0", "base64 0.13.0",
"ed25519-dalek", "ed25519-dalek",
@ -2283,7 +2286,7 @@ dependencies = [
[[package]] [[package]]
name = "ruma-state-res" name = "ruma-state-res"
version = "0.3.0" version = "0.3.0"
source = "git+https://github.com/ruma/ruma?rev=f5ab038e22421ed338396ece977b6b2844772ced#f5ab038e22421ed338396ece977b6b2844772ced" source = "git+https://github.com/DevinR528/ruma?rev=2215049b60a1c3358f5a52215adf1e7bb88619a1#2215049b60a1c3358f5a52215adf1e7bb88619a1"
dependencies = [ dependencies = [
"itertools 0.10.1", "itertools 0.10.1",
"js_int", "js_int",

@ -18,8 +18,8 @@ edition = "2018"
rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests
# Used for matrix spec type definitions and helpers # Used for matrix spec type definitions and helpers
ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { git = "https://github.com/ruma/ruma", rev = "f5ab038e22421ed338396ece977b6b2844772ced", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { git = "https://github.com/timokoesters/ruma", rev = "995ccea20f5f6d4a8fb22041749ed4de22fa1b6a", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } ruma = { git = "https://github.com/DevinR528/ruma", rev = "2215049b60a1c3358f5a52215adf1e7bb88619a1", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
#ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] }
# Used for long polling and federation sender, should be the same as rocket::tokio # Used for long polling and federation sender, should be the same as rocket::tokio

@ -640,23 +640,40 @@ async fn join_room_by_id_helper(
db.rooms.add_pdu_outlier(&event_id, &value)?; db.rooms.add_pdu_outlier(&event_id, &value)?;
if let Some(state_key) = &pdu.state_key { if let Some(state_key) = &pdu.state_key {
state.insert((pdu.kind.clone(), state_key.clone()), pdu.event_id.clone()); let shortstatekey =
db.rooms
.get_or_create_shortstatekey(&pdu.kind, state_key, &db.globals)?;
state.insert(shortstatekey, pdu.event_id.clone());
} }
} }
state.insert( let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey(
( &pdu.kind,
pdu.kind.clone(), pdu.state_key
pdu.state_key.clone().expect("join event has state key"), .as_ref()
), .expect("Pdu is a membership state event"),
pdu.event_id.clone(), &db.globals,
); )?;
state.insert(incoming_shortstatekey, pdu.event_id.clone());
if state.get(&(EventType::RoomCreate, "".to_owned())).is_none() { let create_shortstatekey = db
.rooms
.get_shortstatekey(&EventType::RoomCreate, "")?
.expect("Room exists");
if state.get(&create_shortstatekey).is_none() {
return Err(Error::BadServerResponse("State contained no create event.")); return Err(Error::BadServerResponse("State contained no create event."));
} }
db.rooms.force_state(room_id, state, &db)?; db.rooms.force_state(
room_id,
state
.into_iter()
.map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals))
.collect::<Result<HashSet<_>>>()?,
&db,
)?;
for result in futures::future::join_all( for result in futures::future::join_all(
send_join_response send_join_response
@ -913,8 +930,8 @@ pub async fn invite_helper<'a>(
&room_version, &room_version,
&Arc::new(pdu.clone()), &Arc::new(pdu.clone()),
create_prev_event, create_prev_event,
&auth_events,
None, // TODO: third_party_invite None, // TODO: third_party_invite
|k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
) )
.map_err(|e| { .map_err(|e| {
error!("{:?}", e); error!("{:?}", e);

@ -348,7 +348,7 @@ async fn sync_helper(
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
let state_events = current_state_ids let state_events = current_state_ids
.iter() .iter()
.map(|id| db.rooms.get_pdu(id)) .map(|(_, id)| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten()) .filter_map(|r| r.ok().flatten())
.collect::<Vec<_>>(); .collect::<Vec<_>>();
@ -393,18 +393,19 @@ async fn sync_helper(
let state_events = if joined_since_last_sync { let state_events = if joined_since_last_sync {
current_state_ids current_state_ids
.iter() .iter()
.map(|id| db.rooms.get_pdu(id)) .map(|(_, id)| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten()) .filter_map(|r| r.ok().flatten())
.collect::<Vec<_>>() .collect::<Vec<_>>()
} else { } else {
current_state_ids current_state_ids
.difference(&since_state_ids) .iter()
.filter(|id| { .filter(|(key, id)| since_state_ids.get(key) != Some(id))
.filter(|(_, id)| {
!timeline_pdus !timeline_pdus
.iter() .iter()
.any(|(_, timeline_pdu)| timeline_pdu.event_id == **id) .any(|(_, timeline_pdu)| timeline_pdu.event_id == **id)
}) })
.map(|id| db.rooms.get_pdu(id)) .map(|(_, id)| db.rooms.get_pdu(id))
.filter_map(|r| r.ok().flatten()) .filter_map(|r| r.ok().flatten())
.collect() .collect()
}; };

@ -262,8 +262,8 @@ impl Database {
userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?,
shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?,
shortroomid_roomid: builder.open_tree("shortroomid_roomid")?,
roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, roomid_shortroomid: builder.open_tree("roomid_shortroomid")?,
shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?,
@ -279,8 +279,9 @@ impl Database {
auth_chain_cache: Mutex::new(LruCache::new(100_000)), auth_chain_cache: Mutex::new(LruCache::new(100_000)),
shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), shorteventid_cache: Mutex::new(LruCache::new(1_000_000)),
eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), eventidshort_cache: Mutex::new(LruCache::new(1_000_000)),
shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)),
statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)),
stateinfo_cache: Mutex::new(LruCache::new(50)), stateinfo_cache: Mutex::new(LruCache::new(1000)),
}, },
account_data: account_data::AccountData { account_data: account_data::AccountData {
roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,
@ -579,7 +580,6 @@ impl Database {
for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { for (room_id, _) in db.rooms.roomid_shortstatehash.iter() {
let shortroomid = db.globals.next_count()?.to_be_bytes(); let shortroomid = db.globals.next_count()?.to_be_bytes();
db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?;
db.rooms.shortroomid_roomid.insert(&shortroomid, &room_id)?;
println!("Migration: 8"); println!("Migration: 8");
} }
// Update pduids db layout // Update pduids db layout
@ -700,6 +700,19 @@ impl Database {
println!("Migration: 8 -> 9 finished"); println!("Migration: 8 -> 9 finished");
} }
if db.globals.database_version()? < 10 {
// Add other direction for shortstatekeys
for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() {
db.rooms
.shortstatekey_statekey
.insert(&shortstatekey, &statekey)?;
}
db.globals.bump_database_version(10)?;
println!("Migration: 9 -> 10 finished");
}
} }
let guard = db.read().await; let guard = db.read().await;

@ -23,13 +23,13 @@ use ruma::{
uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId,
}; };
use std::{ use std::{
collections::{BTreeMap, BTreeSet, HashMap, HashSet}, collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
mem::size_of, mem::size_of,
sync::{Arc, Mutex}, sync::{Arc, Mutex},
}; };
use tokio::sync::MutexGuard; use tokio::sync::MutexGuard;
use tracing::{debug, error, warn}; use tracing::{error, warn};
use super::{abstraction::Tree, admin::AdminCommand, pusher}; use super::{abstraction::Tree, admin::AdminCommand, pusher};
@ -73,8 +73,8 @@ pub struct Rooms {
pub(super) shorteventid_shortstatehash: Arc<dyn Tree>, pub(super) shorteventid_shortstatehash: Arc<dyn Tree>,
/// StateKey = EventType + StateKey, ShortStateKey = Count /// StateKey = EventType + StateKey, ShortStateKey = Count
pub(super) statekey_shortstatekey: Arc<dyn Tree>, pub(super) statekey_shortstatekey: Arc<dyn Tree>,
pub(super) shortstatekey_statekey: Arc<dyn Tree>,
pub(super) shortroomid_roomid: Arc<dyn Tree>,
pub(super) roomid_shortroomid: Arc<dyn Tree>, pub(super) roomid_shortroomid: Arc<dyn Tree>,
pub(super) shorteventid_eventid: Arc<dyn Tree>, pub(super) shorteventid_eventid: Arc<dyn Tree>,
@ -95,6 +95,7 @@ pub struct Rooms {
pub(super) shorteventid_cache: Mutex<LruCache<u64, EventId>>, pub(super) shorteventid_cache: Mutex<LruCache<u64, EventId>>,
pub(super) eventidshort_cache: Mutex<LruCache<EventId, u64>>, pub(super) eventidshort_cache: Mutex<LruCache<EventId, u64>>,
pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>, pub(super) statekeyshort_cache: Mutex<LruCache<(EventType, String), u64>>,
pub(super) shortstatekey_cache: Mutex<LruCache<u64, (EventType, String)>>,
pub(super) stateinfo_cache: Mutex< pub(super) stateinfo_cache: Mutex<
LruCache< LruCache<
u64, u64,
@ -112,7 +113,7 @@ impl Rooms {
/// Builds a StateMap by iterating over all keys that start /// Builds a StateMap by iterating over all keys that start
/// with state_hash, this gives the full state for the given state_hash. /// with state_hash, this gives the full state for the given state_hash.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeSet<EventId>> { pub fn state_full_ids(&self, shortstatehash: u64) -> Result<BTreeMap<u64, EventId>> {
let full_state = self let full_state = self
.load_shortstatehash_info(shortstatehash)? .load_shortstatehash_info(shortstatehash)?
.pop() .pop()
@ -138,7 +139,7 @@ impl Rooms {
.into_iter() .into_iter()
.map(|compressed| self.parse_compressed_state_event(compressed)) .map(|compressed| self.parse_compressed_state_event(compressed))
.filter_map(|r| r.ok()) .filter_map(|r| r.ok())
.map(|eventid| self.get_pdu(&eventid)) .map(|(_, eventid)| self.get_pdu(&eventid))
.filter_map(|r| r.ok().flatten()) .filter_map(|r| r.ok().flatten())
.map(|pdu| { .map(|pdu| {
Ok::<_, Error>(( Ok::<_, Error>((
@ -176,7 +177,11 @@ impl Rooms {
Ok(full_state Ok(full_state
.into_iter() .into_iter()
.find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes()))
.and_then(|compressed| self.parse_compressed_state_event(compressed).ok())) .and_then(|compressed| {
self.parse_compressed_state_event(compressed)
.ok()
.map(|(_, id)| id)
}))
} }
/// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`).
@ -232,6 +237,13 @@ impl Rooms {
state_key: Option<&str>, state_key: Option<&str>,
content: &serde_json::Value, content: &serde_json::Value,
) -> Result<StateMap<Arc<PduEvent>>> { ) -> Result<StateMap<Arc<PduEvent>>> {
let shortstatehash =
if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? {
current_shortstatehash
} else {
return Ok(HashMap::new());
};
let auth_events = state_res::auth_types_for_event( let auth_events = state_res::auth_types_for_event(
kind, kind,
sender, sender,
@ -239,19 +251,30 @@ impl Rooms {
content.clone(), content.clone(),
); );
let mut events = StateMap::new(); let mut sauthevents = auth_events
for (event_type, state_key) in auth_events { .into_iter()
if let Some(pdu) = self.room_state_get(room_id, &event_type, &state_key)? { .filter_map(|(event_type, state_key)| {
events.insert((event_type, state_key), pdu); self.get_shortstatekey(&event_type, &state_key)
} else { .ok()
// This is okay because when creating a new room some events were not created yet .flatten()
debug!( .map(|s| (s, (event_type, state_key)))
"{:?}: Could not find {} {:?} in state", })
content, event_type, state_key .collect::<HashMap<_, _>>();
);
} let full_state = self
} .load_shortstatehash_info(shortstatehash)?
Ok(events) .pop()
.expect("there is always one layer")
.1;
Ok(full_state
.into_iter()
.filter_map(|compressed| self.parse_compressed_state_event(compressed).ok())
.filter_map(|(shortstatekey, event_id)| {
sauthevents.remove(&shortstatekey).map(|k| (k, event_id))
})
.filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu)))
.collect())
} }
/// Generate a new StateHash. /// Generate a new StateHash.
@ -306,32 +329,19 @@ impl Rooms {
/// Force the creation of a new StateHash and insert it into the db. /// Force the creation of a new StateHash and insert it into the db.
/// ///
/// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot.
#[tracing::instrument(skip(self, new_state, db))] #[tracing::instrument(skip(self, new_state_ids_compressed, db))]
pub fn force_state( pub fn force_state(
&self, &self,
room_id: &RoomId, room_id: &RoomId,
new_state: HashMap<(EventType, String), EventId>, new_state_ids_compressed: HashSet<CompressedStateEvent>,
db: &Database, db: &Database,
) -> Result<()> { ) -> Result<()> {
let previous_shortstatehash = self.current_shortstatehash(&room_id)?; let previous_shortstatehash = self.current_shortstatehash(&room_id)?;
let new_state_ids_compressed = new_state
.iter()
.filter_map(|((event_type, state_key), event_id)| {
let shortstatekey = self
.get_or_create_shortstatekey(event_type, state_key, &db.globals)
.ok()?;
Some(
self.compress_state_event(shortstatekey, event_id, &db.globals)
.ok()?,
)
})
.collect::<HashSet<_>>();
let state_hash = self.calculate_hash( let state_hash = self.calculate_hash(
&new_state &new_state_ids_compressed
.values() .iter()
.map(|event_id| event_id.as_bytes()) .map(|bytes| &bytes[..])
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
); );
@ -373,10 +383,11 @@ impl Rooms {
)?; )?;
}; };
for event_id in statediffnew for event_id in statediffnew.into_iter().filter_map(|new| {
.into_iter() self.parse_compressed_state_event(new)
.filter_map(|new| self.parse_compressed_state_event(new).ok()) .ok()
{ .map(|(_, id)| id)
}) {
if let Some(pdu) = self.get_pdu_json(&event_id)? { if let Some(pdu) = self.get_pdu_json(&event_id)? {
if pdu.get("type").and_then(|val| val.as_str()) == Some("m.room.member") { if pdu.get("type").and_then(|val| val.as_str()) == Some("m.room.member") {
if let Ok(pdu) = serde_json::from_value::<PduEvent>( if let Ok(pdu) = serde_json::from_value::<PduEvent>(
@ -504,15 +515,20 @@ impl Rooms {
Ok(v.try_into().expect("we checked the size above")) Ok(v.try_into().expect("we checked the size above"))
} }
/// Returns shortstatekey, event id
#[tracing::instrument(skip(self, compressed_event))] #[tracing::instrument(skip(self, compressed_event))]
pub fn parse_compressed_state_event( pub fn parse_compressed_state_event(
&self, &self,
compressed_event: CompressedStateEvent, compressed_event: CompressedStateEvent,
) -> Result<EventId> { ) -> Result<(u64, EventId)> {
self.get_eventid_from_short( Ok((
utils::u64_from_bytes(&compressed_event[size_of::<u64>()..]) utils::u64_from_bytes(&compressed_event[0..size_of::<u64>()])
.expect("bytes have right length"), .expect("bytes have right length"),
) self.get_eventid_from_short(
utils::u64_from_bytes(&compressed_event[size_of::<u64>()..])
.expect("bytes have right length"),
)?,
))
} }
/// Creates a new shortstatehash that often is just a diff to an already existing /// Creates a new shortstatehash that often is just a diff to an already existing
@ -805,6 +821,8 @@ impl Rooms {
let shortstatekey = globals.next_count()?; let shortstatekey = globals.next_count()?;
self.statekey_shortstatekey self.statekey_shortstatekey
.insert(&statekey, &shortstatekey.to_be_bytes())?; .insert(&statekey, &shortstatekey.to_be_bytes())?;
self.shortstatekey_statekey
.insert(&shortstatekey.to_be_bytes(), &statekey)?;
shortstatekey shortstatekey
} }
}; };
@ -833,11 +851,10 @@ impl Rooms {
.get(&shorteventid.to_be_bytes())? .get(&shorteventid.to_be_bytes())?
.ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?;
let event_id = let event_id = EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| {
EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("EventID in shorteventid_eventid is invalid unicode.")
Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") })?)
})?) .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?;
.map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid."))?;
self.shorteventid_cache self.shorteventid_cache
.lock() .lock()
@ -847,6 +864,48 @@ impl Rooms {
Ok(event_id) Ok(event_id)
} }
#[tracing::instrument(skip(self))]
pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(EventType, String)> {
if let Some(id) = self
.shortstatekey_cache
.lock()
.unwrap()
.get_mut(&shortstatekey)
{
return Ok(id.clone());
}
let bytes = self
.shortstatekey_statekey
.get(&shortstatekey.to_be_bytes())?
.ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?;
let mut parts = bytes.splitn(2, |&b| b == 0xff);
let eventtype_bytes = parts.next().expect("split always returns one entry");
let statekey_bytes = parts
.next()
.ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?;
let event_type =
EventType::try_from(utils::string_from_bytes(&eventtype_bytes).map_err(|_| {
Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.")
})?)
.map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?;
let state_key = utils::string_from_bytes(&statekey_bytes).map_err(|_| {
Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.")
})?;
let result = (event_type, state_key);
self.shortstatekey_cache
.lock()
.unwrap()
.insert(shortstatekey, result.clone());
Ok(result)
}
/// Returns the full room state. /// Returns the full room state.
#[tracing::instrument(skip(self))] #[tracing::instrument(skip(self))]
pub fn room_state_full( pub fn room_state_full(
@ -1106,6 +1165,17 @@ impl Rooms {
.collect() .collect()
} }
#[tracing::instrument(skip(self, room_id, event_ids))]
pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> {
for prev in event_ids {
let mut key = room_id.as_bytes().to_vec();
key.extend_from_slice(prev.as_bytes());
self.referencedevents.insert(&key, &[])?;
}
Ok(())
}
/// Replace the leaves of a room. /// Replace the leaves of a room.
/// ///
/// The provided `event_ids` become the new leaves, this allows a room to have multiple /// The provided `event_ids` become the new leaves, this allows a room to have multiple
@ -1202,12 +1272,7 @@ impl Rooms {
} }
// We must keep track of all events that have been referenced. // We must keep track of all events that have been referenced.
for prev in &pdu.prev_events { self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
let mut key = pdu.room_id().as_bytes().to_vec();
key.extend_from_slice(prev.as_bytes());
self.referencedevents.insert(&key, &[])?;
}
self.replace_pdu_leaves(&pdu.room_id, leaves)?; self.replace_pdu_leaves(&pdu.room_id, leaves)?;
let mutex_insert = Arc::clone( let mutex_insert = Arc::clone(
@ -1565,35 +1630,22 @@ impl Rooms {
/// ///
/// This adds all current state events (not including the incoming event) /// This adds all current state events (not including the incoming event)
/// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`.
#[tracing::instrument(skip(self, state, globals))] #[tracing::instrument(skip(self, state_ids_compressed, globals))]
pub fn set_event_state( pub fn set_event_state(
&self, &self,
event_id: &EventId, event_id: &EventId,
room_id: &RoomId, room_id: &RoomId,
state: &StateMap<Arc<PduEvent>>, state_ids_compressed: HashSet<CompressedStateEvent>,
globals: &super::globals::Globals, globals: &super::globals::Globals,
) -> Result<()> { ) -> Result<()> {
let shorteventid = self.get_or_create_shorteventid(&event_id, globals)?; let shorteventid = self.get_or_create_shorteventid(&event_id, globals)?;
let previous_shortstatehash = self.current_shortstatehash(&room_id)?; let previous_shortstatehash = self.current_shortstatehash(&room_id)?;
let state_ids_compressed = state
.iter()
.filter_map(|((event_type, state_key), pdu)| {
let shortstatekey = self
.get_or_create_shortstatekey(event_type, state_key, globals)
.ok()?;
Some(
self.compress_state_event(shortstatekey, &pdu.event_id, globals)
.ok()?,
)
})
.collect::<HashSet<_>>();
let state_hash = self.calculate_hash( let state_hash = self.calculate_hash(
&state &state_ids_compressed
.values() .iter()
.map(|pdu| pdu.event_id.as_bytes()) .map(|s| &s[..])
.collect::<Vec<_>>(), .collect::<Vec<_>>(),
); );
@ -1857,8 +1909,8 @@ impl Rooms {
&room_version, &room_version,
&Arc::new(pdu.clone()), &Arc::new(pdu.clone()),
create_prev_event, create_prev_event,
&auth_events,
None, // TODO: third_party_invite None, // TODO: third_party_invite
|k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
) )
.map_err(|e| { .map_err(|e| {
error!("{:?}", e); error!("{:?}", e);

@ -1,5 +1,5 @@
use std::{ use std::{
collections::{BTreeMap, HashMap}, collections::{BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
fmt::Debug, fmt::Debug,
sync::Arc, sync::Arc,
@ -20,14 +20,17 @@ use ruma::{
appservice, appservice,
federation::{ federation::{
self, self,
transactions::edu::{Edu, ReceiptContent, ReceiptData, ReceiptMap}, transactions::edu::{
DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap,
},
}, },
OutgoingRequest, OutgoingRequest,
}, },
device_id,
events::{push_rules, AnySyncEphemeralRoomEvent, EventType}, events::{push_rules, AnySyncEphemeralRoomEvent, EventType},
push, push,
receipt::ReceiptType, receipt::ReceiptType,
MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId,
}; };
use tokio::{ use tokio::{
select, select,
@ -317,8 +320,19 @@ impl Sending {
})?; })?;
let mut events = Vec::new(); let mut events = Vec::new();
let mut max_edu_count = since; let mut max_edu_count = since;
let mut device_list_changes = HashSet::new();
'outer: for room_id in db.rooms.server_rooms(server) { 'outer: for room_id in db.rooms.server_rooms(server) {
let room_id = room_id?; let room_id = room_id?;
// Look for device list updates in this room
device_list_changes.extend(
db.users
.keys_changed(&room_id.to_string(), since, None)
.filter_map(|r| r.ok())
.filter(|user_id| user_id.server_name() == db.globals.server_name()),
);
// Look for read receipts in this room
for r in db.rooms.edus.readreceipts_since(&room_id, since) { for r in db.rooms.edus.readreceipts_since(&room_id, since) {
let (user_id, count, read_receipt) = r?; let (user_id, count, read_receipt) = r?;
@ -378,6 +392,22 @@ impl Sending {
} }
} }
for user_id in device_list_changes {
// Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767
// Because synapse resyncs, we can just insert dummy data
let edu = Edu::DeviceListUpdate(DeviceListUpdateContent {
user_id,
device_id: device_id!("dummy"),
device_display_name: "Dummy".to_owned(),
stream_id: uint!(1),
prev_id: Vec::new(),
deleted: None,
keys: None,
});
events.push(serde_json::to_vec(&edu).expect("json can be serialized"));
}
Ok((events, max_edu_count)) Ok((events, max_edu_count))
} }

@ -673,7 +673,7 @@ impl Users {
} }
#[tracing::instrument(skip(self, user_id, rooms, globals))] #[tracing::instrument(skip(self, user_id, rooms, globals))]
fn mark_device_key_update( pub fn mark_device_key_update(
&self, &self,
user_id: &UserId, user_id: &UserId,
rooms: &super::rooms::Rooms, rooms: &super::rooms::Rooms,

@ -1,6 +1,6 @@
use crate::{ use crate::{
client_server::{self, claim_keys_helper, get_keys_helper}, client_server::{self, claim_keys_helper, get_keys_helper},
database::DatabaseGuard, database::{rooms::CompressedStateEvent, DatabaseGuard},
utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, utils, ConduitResult, Database, Error, PduEvent, Result, Ruma,
}; };
use get_profile_information::v1::ProfileField; use get_profile_information::v1::ProfileField;
@ -27,7 +27,7 @@ use ruma::{
}, },
query::{get_profile_information, get_room_information}, query::{get_profile_information, get_room_information},
transactions::{ transactions::{
edu::{DirectDeviceContent, Edu}, edu::{DeviceListUpdateContent, DirectDeviceContent, Edu},
send_transaction_message, send_transaction_message,
}, },
}, },
@ -51,7 +51,7 @@ use ruma::{
ServerSigningKeyId, UserId, ServerSigningKeyId, UserId,
}; };
use std::{ use std::{
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet},
convert::{TryFrom, TryInto}, convert::{TryFrom, TryInto},
fmt::Debug, fmt::Debug,
future::Future, future::Future,
@ -747,8 +747,9 @@ pub async fn send_transaction_message_route(
.typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?;
} }
} }
Edu::DeviceListUpdate(_) => { Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => {
// TODO: Instead of worrying about stream ids we can just fetch all devices again db.users
.mark_device_key_update(&user_id, &db.rooms, &db.globals)?;
} }
Edu::DirectToDevice(DirectDeviceContent { Edu::DirectToDevice(DirectDeviceContent {
sender, sender,
@ -1079,7 +1080,7 @@ fn handle_outlier_pdu<'a>(
// 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events
// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events"
// EDIT: Step 5 is not applied anymore because it failed too often // EDIT: Step 5 is not applied anymore because it failed too often
debug!("Fetching auth events for {}", incoming_pdu.event_id); warn!("Fetching auth events for {}", incoming_pdu.event_id);
fetch_and_handle_outliers( fetch_and_handle_outliers(
db, db,
origin, origin,
@ -1114,10 +1115,10 @@ fn handle_outlier_pdu<'a>(
.clone() .clone()
.expect("all auth events have state keys"), .expect("all auth events have state keys"),
)) { )) {
Entry::Vacant(v) => { hash_map::Entry::Vacant(v) => {
v.insert(auth_event.clone()); v.insert(auth_event.clone());
} }
Entry::Occupied(_) => { hash_map::Entry::Occupied(_) => {
return Err( return Err(
"Auth event's type and state_key combination exists multiple times." "Auth event's type and state_key combination exists multiple times."
.to_owned(), .to_owned(),
@ -1153,8 +1154,8 @@ fn handle_outlier_pdu<'a>(
&room_version, &room_version,
&incoming_pdu, &incoming_pdu,
previous_create.clone(), previous_create.clone(),
&auth_events,
None, // TODO: third party invite None, // TODO: third party invite
|k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
) )
.map_err(|_e| "Auth check failed".to_string())? .map_err(|_e| "Auth check failed".to_string())?
{ {
@ -1205,38 +1206,21 @@ async fn upgrade_outlier_to_timeline_pdu(
let state = let state =
prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash));
if let Some(Ok(state)) = state { if let Some(Ok(mut state)) = state {
warn!("Using cached state"); warn!("Using cached state");
let mut state = fetch_and_handle_outliers(
db,
origin,
&state.into_iter().collect::<Vec<_>>(),
&create_event,
&room_id,
pub_key_map,
)
.await
.into_iter()
.map(|(pdu, _)| {
(
(
pdu.kind.clone(),
pdu.state_key
.clone()
.expect("events from state_full_ids are state events"),
),
pdu,
)
})
.collect::<HashMap<_, _>>();
let prev_pdu = let prev_pdu =
db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| {
"Could not find prev event, but we know the state.".to_owned() "Could not find prev event, but we know the state.".to_owned()
})?; })?;
if let Some(state_key) = &prev_pdu.state_key { if let Some(state_key) = &prev_pdu.state_key {
state.insert((prev_pdu.kind.clone(), state_key.clone()), prev_pdu); let shortstatekey = db
.rooms
.get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?;
state.insert(shortstatekey, prev_event.clone());
// Now it's the state after the pdu
} }
state_at_incoming_event = Some(state); state_at_incoming_event = Some(state);
@ -1261,7 +1245,7 @@ async fn upgrade_outlier_to_timeline_pdu(
.await .await
{ {
Ok(res) => { Ok(res) => {
debug!("Fetching state events at event."); warn!("Fetching state events at event.");
let state_vec = fetch_and_handle_outliers( let state_vec = fetch_and_handle_outliers(
&db, &db,
origin, origin,
@ -1272,18 +1256,23 @@ async fn upgrade_outlier_to_timeline_pdu(
) )
.await; .await;
let mut state = HashMap::new(); let mut state = BTreeMap::new();
for (pdu, _) in state_vec { for (pdu, _) in state_vec {
match state.entry(( let state_key = pdu
pdu.kind.clone(), .state_key
pdu.state_key .clone()
.clone() .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?;
.ok_or_else(|| "Found non-state pdu in state events.".to_owned())?,
)) { let shortstatekey = db
Entry::Vacant(v) => { .rooms
v.insert(pdu); .get_or_create_shortstatekey(&pdu.kind, &state_key, &db.globals)
.map_err(|_| "Failed to create shortstatekey.".to_owned())?;
match state.entry(shortstatekey) {
btree_map::Entry::Vacant(v) => {
v.insert(pdu.event_id.clone());
} }
Entry::Occupied(_) => return Err( btree_map::Entry::Occupied(_) => return Err(
"State event's type and state_key combination exists multiple times." "State event's type and state_key combination exists multiple times."
.to_owned(), .to_owned(),
), ),
@ -1291,28 +1280,20 @@ async fn upgrade_outlier_to_timeline_pdu(
} }
// The original create event must still be in the state // The original create event must still be in the state
if state let create_shortstatekey = db
.get(&(EventType::RoomCreate, "".to_owned())) .rooms
.map(|a| a.as_ref()) .get_shortstatekey(&EventType::RoomCreate, "")
!= Some(&create_event) .map_err(|_| "Failed to talk to db.")?
{ .expect("Room exists");
if state.get(&create_shortstatekey) != Some(&create_event.event_id) {
return Err("Incoming event refers to wrong create event.".to_owned()); return Err("Incoming event refers to wrong create event.".to_owned());
} }
debug!("Fetching auth chain events at event.");
fetch_and_handle_outliers(
&db,
origin,
&res.auth_chain_ids,
&create_event,
&room_id,
pub_key_map,
)
.await;
state_at_incoming_event = Some(state); state_at_incoming_event = Some(state);
} }
Err(_) => { Err(e) => {
warn!("Fetching state for event failed: {}", e);
return Err("Fetching state for event failed".into()); return Err("Fetching state for event failed".into());
} }
}; };
@ -1350,8 +1331,15 @@ async fn upgrade_outlier_to_timeline_pdu(
&room_version, &room_version,
&incoming_pdu, &incoming_pdu,
previous_create.clone(), previous_create.clone(),
&state_at_incoming_event,
None, // TODO: third party invite None, // TODO: third party invite
|k, s| {
db.rooms
.get_shortstatekey(&k, &s)
.ok()
.flatten()
.and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey))
.and_then(|event_id| db.rooms.get_pdu(&event_id).ok().flatten())
},
) )
.map_err(|_e| "Auth check failed.".to_owned())? .map_err(|_e| "Auth check failed.".to_owned())?
{ {
@ -1388,28 +1376,28 @@ async fn upgrade_outlier_to_timeline_pdu(
// Only keep those extremities were not referenced yet // Only keep those extremities were not referenced yet
extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true))); extremities.retain(|id| !matches!(db.rooms.is_event_referenced(&room_id, id), Ok(true)));
let current_statehash = db let current_sstatehash = db
.rooms .rooms
.current_shortstatehash(&room_id) .current_shortstatehash(&room_id)
.map_err(|_| "Failed to load current state hash.".to_owned())? .map_err(|_| "Failed to load current state hash.".to_owned())?
.expect("every room has state"); .expect("every room has state");
let current_state = db let current_state_ids = db
.rooms .rooms
.state_full(current_statehash) .state_full_ids(current_sstatehash)
.map_err(|_| "Failed to load room state.")?; .map_err(|_| "Failed to load room state.")?;
if incoming_pdu.state_key.is_some() { if incoming_pdu.state_key.is_some() {
let mut extremity_statehashes = Vec::new(); let mut extremity_sstatehashes = HashMap::new();
for id in &extremities { for id in dbg!(&extremities) {
match db match db
.rooms .rooms
.get_pdu(&id) .get_pdu(&id)
.map_err(|_| "Failed to ask db for pdu.".to_owned())? .map_err(|_| "Failed to ask db for pdu.".to_owned())?
{ {
Some(leaf_pdu) => { Some(leaf_pdu) => {
extremity_statehashes.push(( extremity_sstatehashes.insert(
db.rooms db.rooms
.pdu_shortstatehash(&leaf_pdu.event_id) .pdu_shortstatehash(&leaf_pdu.event_id)
.map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())?
@ -1420,8 +1408,8 @@ async fn upgrade_outlier_to_timeline_pdu(
); );
"Found pdu with no statehash in db.".to_owned() "Found pdu with no statehash in db.".to_owned()
})?, })?,
Some(leaf_pdu), leaf_pdu,
)); );
} }
_ => { _ => {
error!("Missing state snapshot for {:?}", id); error!("Missing state snapshot for {:?}", id);
@ -1430,27 +1418,30 @@ async fn upgrade_outlier_to_timeline_pdu(
} }
} }
let mut fork_states = Vec::new();
// 12. Ensure that the state is derived from the previous current state (i.e. we calculated // 12. Ensure that the state is derived from the previous current state (i.e. we calculated
// by doing state res where one of the inputs was a previously trusted set of state, // by doing state res where one of the inputs was a previously trusted set of state,
// don't just trust a set of state we got from a remote). // don't just trust a set of state we got from a remote).
// We do this by adding the current state to the list of fork states // We do this by adding the current state to the list of fork states
extremity_sstatehashes.remove(&current_sstatehash);
fork_states.push(current_state_ids);
dbg!(&extremity_sstatehashes);
extremity_statehashes.push((current_statehash.clone(), None)); for (sstatehash, leaf_pdu) in extremity_sstatehashes {
let mut fork_states = Vec::new();
for (statehash, leaf_pdu) in extremity_statehashes {
let mut leaf_state = db let mut leaf_state = db
.rooms .rooms
.state_full(statehash) .state_full_ids(sstatehash)
.map_err(|_| "Failed to ask db for room state.".to_owned())?; .map_err(|_| "Failed to ask db for room state.".to_owned())?;
if let Some(leaf_pdu) = leaf_pdu { if let Some(state_key) = &leaf_pdu.state_key {
if let Some(state_key) = &leaf_pdu.state_key { let shortstatekey = db
// Now it's the state after .rooms
let key = (leaf_pdu.kind.clone(), state_key.clone()); .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals)
leaf_state.insert(key, leaf_pdu); .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
} leaf_state.insert(shortstatekey, leaf_pdu.event_id.clone());
// Now it's the state after the pdu
} }
fork_states.push(leaf_state); fork_states.push(leaf_state);
@ -1459,10 +1450,12 @@ async fn upgrade_outlier_to_timeline_pdu(
// We also add state after incoming event to the fork states // We also add state after incoming event to the fork states
let mut state_after = state_at_incoming_event.clone(); let mut state_after = state_at_incoming_event.clone();
if let Some(state_key) = &incoming_pdu.state_key { if let Some(state_key) = &incoming_pdu.state_key {
state_after.insert( let shortstatekey = db
(incoming_pdu.kind.clone(), state_key.clone()), .rooms
incoming_pdu.clone(), .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals)
); .map_err(|_| "Failed to create shortstatekey.".to_owned())?;
state_after.insert(shortstatekey, incoming_pdu.event_id.clone());
} }
fork_states.push(state_after.clone()); fork_states.push(state_after.clone());
@ -1475,8 +1468,12 @@ async fn upgrade_outlier_to_timeline_pdu(
// always included) // always included)
fork_states[0] fork_states[0]
.iter() .iter()
.map(|(k, pdu)| (k.clone(), pdu.event_id.clone())) .map(|(k, id)| {
.collect() db.rooms
.compress_state_event(*k, &id, &db.globals)
.map_err(|_| "Failed to compress_state_event.".to_owned())
})
.collect::<StdResult<_, String>>()?
} else { } else {
// We do need to force an update to this room's state // We do need to force an update to this room's state
update_state = true; update_state = true;
@ -1485,10 +1482,11 @@ async fn upgrade_outlier_to_timeline_pdu(
.into_iter() .into_iter()
.map(|map| { .map(|map| {
map.into_iter() map.into_iter()
.map(|(k, v)| (k, v.event_id.clone())) .map(|(k, id)| (db.rooms.get_statekey_from_short(k).map(|k| (k, id))))
.collect::<StateMap<_>>() .collect::<Result<StateMap<_>>>()
}) })
.collect::<Vec<_>>(); .collect::<Result<Vec<_>>>()
.map_err(|_| "Failed to get_statekey_from_short.".to_owned())?;
let mut auth_chain_sets = Vec::new(); let mut auth_chain_sets = Vec::new();
for state in fork_states { for state in fork_states {
@ -1519,6 +1517,17 @@ async fn upgrade_outlier_to_timeline_pdu(
}; };
state state
.into_iter()
.map(|((event_type, state_key), event_id)| {
let shortstatekey = db
.rooms
.get_or_create_shortstatekey(&event_type, &state_key, &db.globals)
.map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?;
db.rooms
.compress_state_event(shortstatekey, &event_id, &db.globals)
.map_err(|_| "Failed to compress state event".to_owned())
})
.collect::<StdResult<_, String>>()?
}; };
// Set the new room state to the resolved state // Set the new room state to the resolved state
@ -1534,38 +1543,55 @@ async fn upgrade_outlier_to_timeline_pdu(
debug!("starting soft fail auth check"); debug!("starting soft fail auth check");
// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it
let auth_events = db
.rooms
.get_auth_events(
&room_id,
&incoming_pdu.kind,
&incoming_pdu.sender,
incoming_pdu.state_key.as_deref(),
&incoming_pdu.content,
)
.map_err(|_| "Failed to get_auth_events.".to_owned())?;
let soft_fail = !state_res::event_auth::auth_check( let soft_fail = !state_res::event_auth::auth_check(
&room_version, &room_version,
&incoming_pdu, &incoming_pdu,
previous_create, previous_create,
&current_state,
None, None,
|k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
) )
.map_err(|_e| "Auth check failed.".to_owned())?; .map_err(|_e| "Auth check failed.".to_owned())?;
let mut pdu_id = None; // Now that the event has passed all auth it is added into the timeline.
if !soft_fail { // We use the `state_at_event` instead of `state_after` so we accurately
// Now that the event has passed all auth it is added into the timeline. // represent the state for this event.
// We use the `state_at_event` instead of `state_after` so we accurately
// represent the state for this event. let state_ids_compressed = state_at_incoming_event
pdu_id = Some( .iter()
append_incoming_pdu( .map(|(shortstatekey, id)| {
&db, db.rooms
&incoming_pdu, .compress_state_event(*shortstatekey, &id, &db.globals)
val, .map_err(|_| "Failed to compress_state_event".to_owned())
extremities, })
&state_at_incoming_event, .collect::<StdResult<_, String>>()?;
&state_lock,
) let pdu_id = append_incoming_pdu(
.map_err(|_| "Failed to add pdu to db.".to_owned())?, &db,
); &incoming_pdu,
debug!("Appended incoming pdu."); val,
} else { extremities,
warn!("Event was soft failed: {:?}", incoming_pdu); state_ids_compressed,
} soft_fail,
&state_lock,
)
.map_err(|_| "Failed to add pdu to db.".to_owned())?;
debug!("Appended incoming pdu.");
if soft_fail { if soft_fail {
// Soft fail, we leave the event as an outlier but don't add it to the timeline // Soft fail, we keep the event as an outlier but don't add it to the timeline
warn!("Event was soft failed: {:?}", incoming_pdu);
return Err("Event has been soft failed".into()); return Err("Event has been soft failed".into());
} }
@ -1594,15 +1620,14 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> { ) -> AsyncRecursiveType<'a, Vec<(Arc<PduEvent>, Option<BTreeMap<String, CanonicalJsonValue>>)>> {
Box::pin(async move { Box::pin(async move {
let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) {
Entry::Vacant(e) => { hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1)); e.insert((Instant::now(), 1));
} }
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
}; };
let mut pdus = vec![]; let mut pdus = vec![];
for id in events { for id in events {
info!("loading {}", id);
if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) { if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(&id) {
// Exponential backoff // Exponential backoff
let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries);
@ -1627,7 +1652,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
} }
Ok(None) => { Ok(None) => {
// c. Ask origin server over federation // c. Ask origin server over federation
info!("Fetching {} over federation.", id); warn!("Fetching {} over federation.", id);
match db match db
.sending .sending
.send_federation_request( .send_federation_request(
@ -1638,7 +1663,7 @@ pub(crate) fn fetch_and_handle_outliers<'a>(
.await .await
{ {
Ok(res) => { Ok(res) => {
info!("Got {} over federation", id); warn!("Got {} over federation", id);
let (event_id, value) = let (event_id, value) =
match crate::pdu::gen_event_id_canonical_json(&res.pdu) { match crate::pdu::gen_event_id_canonical_json(&res.pdu) {
Ok(t) => t, Ok(t) => t,
@ -1727,10 +1752,10 @@ pub(crate) async fn fetch_signing_keys(
.unwrap() .unwrap()
.entry(id) .entry(id)
{ {
Entry::Vacant(e) => { hash_map::Entry::Vacant(e) => {
e.insert((Instant::now(), 1)); e.insert((Instant::now(), 1));
} }
Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1),
}; };
if let Some((time, tries)) = db if let Some((time, tries)) = db
@ -1847,19 +1872,34 @@ pub(crate) async fn fetch_signing_keys(
/// Append the incoming event setting the state snapshot to the state from the /// Append the incoming event setting the state snapshot to the state from the
/// server that sent the event. /// server that sent the event.
#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state, _mutex_lock))] #[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))]
fn append_incoming_pdu( fn append_incoming_pdu(
db: &Database, db: &Database,
pdu: &PduEvent, pdu: &PduEvent,
pdu_json: CanonicalJsonObject, pdu_json: CanonicalJsonObject,
new_room_leaves: HashSet<EventId>, new_room_leaves: HashSet<EventId>,
state: &StateMap<Arc<PduEvent>>, state_ids_compressed: HashSet<CompressedStateEvent>,
soft_fail: bool,
_mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex
) -> Result<Vec<u8>> { ) -> Result<Option<Vec<u8>>> {
// We append to state before appending the pdu, so we don't have a moment in time with the // We append to state before appending the pdu, so we don't have a moment in time with the
// pdu without it's state. This is okay because append_pdu can't fail. // pdu without it's state. This is okay because append_pdu can't fail.
db.rooms db.rooms.set_event_state(
.set_event_state(&pdu.event_id, &pdu.room_id, state, &db.globals)?; &pdu.event_id,
&pdu.room_id,
state_ids_compressed,
&db.globals,
)?;
if soft_fail {
db.rooms
.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?;
db.rooms.replace_pdu_leaves(
&pdu.room_id,
&new_room_leaves.into_iter().collect::<Vec<_>>(),
)?;
return Ok(None);
}
let pdu_id = db.rooms.append_pdu( let pdu_id = db.rooms.append_pdu(
pdu, pdu,
@ -1926,7 +1966,7 @@ fn append_incoming_pdu(
} }
} }
Ok(pdu_id) Ok(Some(pdu_id))
} }
#[tracing::instrument(skip(starting_events, db))] #[tracing::instrument(skip(starting_events, db))]
@ -2120,7 +2160,7 @@ pub fn get_room_state_route(
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)?
.into_iter() .into_iter()
.map(|id| { .map(|(_, id)| {
PduEvent::convert_to_outgoing_federation_event( PduEvent::convert_to_outgoing_federation_event(
db.rooms.get_pdu_json(&id).unwrap().unwrap(), db.rooms.get_pdu_json(&id).unwrap().unwrap(),
) )
@ -2168,6 +2208,7 @@ pub fn get_room_state_ids_route(
.rooms .rooms
.state_full_ids(shortstatehash)? .state_full_ids(shortstatehash)?
.into_iter() .into_iter()
.map(|(_, id)| id)
.collect(); .collect();
let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?; let auth_chain_ids = get_auth_chain(vec![body.event_id.clone()], &db)?;
@ -2314,8 +2355,8 @@ pub fn create_join_event_template_route(
&room_version, &room_version,
&Arc::new(pdu.clone()), &Arc::new(pdu.clone()),
create_prev_event, create_prev_event,
&auth_events,
None, // TODO: third_party_invite None, // TODO: third_party_invite
|k, s| auth_events.get(&(k.clone(), s.to_owned())).map(Arc::clone),
) )
.map_err(|e| { .map_err(|e| {
error!("{:?}", e); error!("{:?}", e);
@ -2418,7 +2459,7 @@ async fn create_join_event(
drop(mutex_lock); drop(mutex_lock);
let state_ids = db.rooms.state_full_ids(shortstatehash)?; let state_ids = db.rooms.state_full_ids(shortstatehash)?;
let auth_chain_ids = get_auth_chain(state_ids.iter().cloned().collect(), &db)?; let auth_chain_ids = get_auth_chain(state_ids.iter().map(|(_, id)| id.clone()).collect(), &db)?;
for server in db for server in db
.rooms .rooms
@ -2438,7 +2479,7 @@ async fn create_join_event(
.collect(), .collect(),
state: state_ids state: state_ids
.iter() .iter()
.filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) .filter_map(|(_, id)| db.rooms.get_pdu_json(&id).ok().flatten())
.map(PduEvent::convert_to_outgoing_federation_event) .map(PduEvent::convert_to_outgoing_federation_event)
.collect(), .collect(),
}) })
@ -2455,10 +2496,7 @@ pub async fn create_join_event_v1_route(
) -> ConduitResult<create_join_event::v1::Response> { ) -> ConduitResult<create_join_event::v1::Response> {
let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?;
Ok(create_join_event::v1::Response { Ok(create_join_event::v1::Response { room_state }.into())
room_state: room_state,
}
.into())
} }
#[cfg_attr( #[cfg_attr(
@ -2472,10 +2510,7 @@ pub async fn create_join_event_v2_route(
) -> ConduitResult<create_join_event::v2::Response> { ) -> ConduitResult<create_join_event::v2::Response> {
let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?;
Ok(create_join_event::v2::Response { Ok(create_join_event::v2::Response { room_state }.into())
room_state: room_state,
}
.into())
} }
#[cfg_attr( #[cfg_attr(

Loading…
Cancel
Save