From a8dc757edb65a75de38f1f9e287743686979da7a Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 9 Sep 2022 19:17:29 +0200 Subject: [PATCH 01/53] feat: Add max prev events config option, allowing adjusting limit for prev_events fetching --- src/config/mod.rs | 6 ++++++ src/service/globals/mod.rs | 4 ++++ src/service/rooms/event_handler/mod.rs | 2 +- 3 files changed, 11 insertions(+), 1 deletion(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 6b862bb6..d8ba1840 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -40,6 +40,8 @@ pub struct Config { pub max_request_size: u32, #[serde(default = "default_max_concurrent_requests")] pub max_concurrent_requests: u16, + #[serde(default = "default_max_fetch_prev_events")] + pub max_fetch_prev_events: u16, #[serde(default = "false_fn")] pub allow_registration: bool, #[serde(default = "true_fn")] @@ -249,6 +251,10 @@ fn default_max_concurrent_requests() -> u16 { 100 } +fn default_max_fetch_prev_events() -> u16 { + 100_u16 +} + fn default_log() -> String { "warn,state_res=warn,_=off,sled=off".to_owned() } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index affc0516..979a922a 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -222,6 +222,10 @@ impl Service { self.config.max_request_size } + pub fn max_fetch_prev_events(&self) -> u16 { + self.config.max_fetch_prev_events + } + pub fn allow_registration(&self) -> bool { self.config.allow_registration } diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 3b41e86f..18ba3b89 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -1209,7 +1209,7 @@ impl Service { .await .pop() { - if amount > 100 { + if amount > services().globals.max_fetch_prev_events() { // Max limit reached warn!("Max prev event limit reached!"); graph.insert(prev_event_id.clone(), HashSet::new()); From d51f8a6c550bd0102b1d6d6de1de90f67f1a46fd Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Sat, 15 Jan 2022 19:13:17 +0200 Subject: [PATCH 02/53] feat: Add federation backfill and event visibility Co-authored-by: Nyaaori <+@nyaaori.cat> --- src/api/server_server.rs | 106 ++++++++++++++++-- .../key_value/rooms/state_accessor.rs | 37 +++++- src/main.rs | 1 + src/service/rooms/state_accessor/data.rs | 5 +- src/service/rooms/state_accessor/mod.rs | 12 +- 5 files changed, 150 insertions(+), 11 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b7f88078..63a3a57e 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -12,6 +12,7 @@ use ruma::{ client::error::{Error as RumaError, ErrorKind}, federation::{ authorization::get_event_authorization, + backfill::get_backfill, device::get_devices::{self, v1::UserDevice}, directory::{get_public_rooms, get_public_rooms_filtered}, discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, @@ -43,11 +44,11 @@ use ruma::{ serde::{Base64, JsonObject, Raw}, to_device::DeviceIdOrAllDevices, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, + OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, UInt, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::BTreeMap, + collections::{BTreeMap, HashSet}, fmt::Debug, mem, net::{IpAddr, SocketAddr}, @@ -952,6 +953,53 @@ pub async fn get_event_route( }) } +/// # `GET /_matrix/federation/v1/backfill/` +/// +/// Retrieves events from before the sender joined the room, if the room's +/// history visibility allows. +pub async fn get_backfill_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + info!("Got backfill request from: {}", sender_servername); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + let origin = services().globals.server_name().to_owned(); + let earliest_events = &[]; + + let events = get_missing_events( + sender_servername, + &body.room_id, + earliest_events, + &body.v, + body.limit, + )?; + + Ok(get_backfill::v1::Response { + origin, + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdus: events, + }) +} + /// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` /// /// Retrieves events that the sender is missing. @@ -983,11 +1031,43 @@ pub async fn get_missing_events_route( .event_handler .acl_check(sender_servername, &body.room_id)?; - let mut queued_events = body.latest_events.clone(); + let events = get_missing_events( + sender_servername, + &body.room_id, + &body.earliest_events, + &body.latest_events, + body.limit, + )?; + + Ok(get_missing_events::v1::Response { events }) +} + +// Recursively fetch events starting from `latest_events`, going backwards +// through each event's `prev_events` until reaching the `earliest_events`. +// +// Used by the federation /backfill and /get_missing_events routes. +fn get_missing_events( + sender_servername: &ServerName, + room_id: &RoomId, + earliest_events: &[OwnedEventId], + latest_events: &Vec, + limit: UInt, +) -> Result>> { + let limit = u64::from(limit) as usize; + + let mut queued_events = latest_events.clone(); let mut events = Vec::new(); + let mut stop_at_events = HashSet::with_capacity(limit); + stop_at_events.extend(earliest_events.iter().cloned()); + let mut i = 0; - while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { + while i < queued_events.len() && events.len() < limit { + if stop_at_events.contains(&queued_events[i]) { + i += 1; + continue; + } + if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? { let room_id_str = pdu .get("room_id") @@ -997,10 +1077,10 @@ pub async fn get_missing_events_route( let event_room_id = <&RoomId>::try_from(room_id_str) .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - if event_room_id != body.room_id { + if event_room_id != room_id { warn!( "Evil event detected: Event {} found while searching in room {}", - queued_events[i], body.room_id + queued_events[i], room_id ); return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -1008,10 +1088,20 @@ pub async fn get_missing_events_route( )); } - if body.earliest_events.contains(&queued_events[i]) { + let event_is_visible = services() + .rooms + .state_accessor + .server_can_see_event(sender_servername, &queued_events[i])?; + + if !event_is_visible { i += 1; continue; } + + // Don't send this event again if it comes through some other + // event's prev_events. + stop_at_events.insert(queued_events[i].clone()); + queued_events.extend_from_slice( &serde_json::from_value::>( serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { @@ -1026,7 +1116,7 @@ pub async fn get_missing_events_route( i += 1; } - Ok(get_missing_events::v1::Response { events }) + Ok(events) } /// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 70e59acb..8bc94982 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -5,7 +5,13 @@ use std::{ use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; use async_trait::async_trait; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{ + events::{ + room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + StateEventType, + }, + EventId, RoomId, ServerName, +}; #[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { @@ -141,6 +147,35 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { }) } + /// Whether a server is allowed to see an event through federation, based on + /// the room's history_visibility at that event's state. + /// + /// Note: Joined/Invited history visibility not yet implemented. + #[tracing::instrument(skip(self))] + fn server_can_see_event(&self, _server_name: &ServerName, event_id: &EventId) -> Result { + let shortstatehash = match self.pdu_shortstatehash(event_id) { + Ok(Some(shortstatehash)) => shortstatehash, + _ => return Ok(false), + }; + + let history_visibility = self + .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? + .map(|event| serde_json::from_str(event.content.get())) + .transpose() + .map_err(|_| Error::bad_database("Invalid room history visibility event in database."))? + .map(|content: RoomHistoryVisibilityEventContent| content.history_visibility); + + Ok(match history_visibility { + Some(HistoryVisibility::WorldReadable) => true, + Some(HistoryVisibility::Shared) => true, + // TODO: Check if any of the server's users were invited + // at this point in time. + Some(HistoryVisibility::Joined) => false, + Some(HistoryVisibility::Invited) => false, + _ => false, + }) + } + /// Returns the full room state. async fn room_state_full( &self, diff --git a/src/main.rs b/src/main.rs index d2183a39..9b79a51b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -368,6 +368,7 @@ fn routes() -> Router { .ruma_route(server_server::send_transaction_message_route) .ruma_route(server_server::get_event_route) .ruma_route(server_server::get_missing_events_route) + .ruma_route(server_server::get_backfill_route) .ruma_route(server_server::get_event_authorization_route) .ruma_route(server_server::get_room_state_route) .ruma_route(server_server::get_room_state_ids_route) diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 340b19c3..169c8be8 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -4,7 +4,7 @@ use std::{ }; use async_trait::async_trait; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId, ServerName}; use crate::{PduEvent, Result}; @@ -38,6 +38,9 @@ pub trait Data: Send + Sync { /// Returns the state hash for this pdu. fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; + /// Returns true if a server has permission to see an event + fn server_can_see_event(&self, sever_name: &ServerName, event_id: &EventId) -> Result; + /// Returns the full room state. async fn room_state_full( &self, diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 1a9c4a9e..89135da6 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -5,7 +5,7 @@ use std::{ }; pub use data::Data; -use ruma::{events::StateEventType, EventId, RoomId}; +use ruma::{events::StateEventType, EventId, RoomId, ServerName}; use crate::{PduEvent, Result}; @@ -54,6 +54,16 @@ impl Service { self.db.pdu_shortstatehash(event_id) } + /// Returns true if a server has permission to see an event + #[tracing::instrument(skip(self))] + pub fn server_can_see_event<'a>( + &'a self, + sever_name: &ServerName, + event_id: &EventId, + ) -> Result { + self.db.server_can_see_event(sever_name, event_id) + } + /// Returns the full room state. #[tracing::instrument(skip(self))] pub async fn room_state_full( From 362026857706e4a00858062f148cad70da7fefcd Mon Sep 17 00:00:00 2001 From: Andrei Vasiliu Date: Sat, 22 Jan 2022 18:26:28 +0200 Subject: [PATCH 03/53] feat: Implement backfill joined/invited checks for private history Co-authored-by: Nyaaori <+@nyaaori.cat> --- src/api/server_server.rs | 13 ++- .../key_value/rooms/state_accessor.rs | 81 ++++++++----- src/service/mod.rs | 7 +- src/service/rooms/state_accessor/data.rs | 17 ++- src/service/rooms/state_accessor/mod.rs | 109 ++++++++++++++++-- 5 files changed, 178 insertions(+), 49 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 63a3a57e..b0b33b86 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1050,12 +1050,12 @@ fn get_missing_events( sender_servername: &ServerName, room_id: &RoomId, earliest_events: &[OwnedEventId], - latest_events: &Vec, + latest_events: &[OwnedEventId], limit: UInt, ) -> Result>> { let limit = u64::from(limit) as usize; - let mut queued_events = latest_events.clone(); + let mut queued_events = latest_events.to_owned(); let mut events = Vec::new(); let mut stop_at_events = HashSet::with_capacity(limit); @@ -1088,10 +1088,11 @@ fn get_missing_events( )); } - let event_is_visible = services() - .rooms - .state_accessor - .server_can_see_event(sender_servername, &queued_events[i])?; + let event_is_visible = services().rooms.state_accessor.server_can_see_event( + sender_servername, + room_id, + &queued_events[i], + )?; if !event_is_visible { i += 1; diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index 8bc94982..ffdd8acc 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -6,11 +6,8 @@ use std::{ use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; use async_trait::async_trait; use ruma::{ - events::{ - room::history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - StateEventType, - }, - EventId, RoomId, ServerName, + events::{room::member::MembershipState, StateEventType}, + EventId, RoomId, UserId, }; #[async_trait] @@ -129,6 +126,21 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { }) } + fn state_get_content( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result> { + let content = self + .state_get(shortstatehash, event_type, state_key)? + .map(|event| serde_json::from_str(event.content.get())) + .transpose() + .map_err(|_| Error::bad_database("Invalid event in database"))?; + + Ok(content) + } + /// Returns the state hash for this pdu. fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.eventid_shorteventid @@ -147,33 +159,40 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { }) } - /// Whether a server is allowed to see an event through federation, based on - /// the room's history_visibility at that event's state. - /// - /// Note: Joined/Invited history visibility not yet implemented. - #[tracing::instrument(skip(self))] - fn server_can_see_event(&self, _server_name: &ServerName, event_id: &EventId) -> Result { - let shortstatehash = match self.pdu_shortstatehash(event_id) { - Ok(Some(shortstatehash)) => shortstatehash, - _ => return Ok(false), - }; + /// The user was a joined member at this state (potentially in the past) + fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> Result { + Ok(self + .state_get_content( + shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + )? + .map(|content| match content.get("membership") { + Some(membership) => MembershipState::from(membership.as_str().unwrap_or("")), + None => MembershipState::Leave, + } == MembershipState::Join) + .unwrap_or(false)) + } - let history_visibility = self - .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? - .map(|event| serde_json::from_str(event.content.get())) - .transpose() - .map_err(|_| Error::bad_database("Invalid room history visibility event in database."))? - .map(|content: RoomHistoryVisibilityEventContent| content.history_visibility); - - Ok(match history_visibility { - Some(HistoryVisibility::WorldReadable) => true, - Some(HistoryVisibility::Shared) => true, - // TODO: Check if any of the server's users were invited - // at this point in time. - Some(HistoryVisibility::Joined) => false, - Some(HistoryVisibility::Invited) => false, - _ => false, - }) + /// The user was an invited or joined room member at this state (potentially + /// in the past) + fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> Result { + Ok(self + .state_get_content( + shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + )? + .map(|content| { + let membership = match content.get("membership") { + Some(membership) => MembershipState::from(membership.as_str().unwrap_or("")), + None => MembershipState::Leave, + }; + let joined = membership == MembershipState::Join; + let invited = membership == MembershipState::Invite; + invited || joined + }) + .unwrap_or(false)) } /// Returns the full room state. diff --git a/src/service/mod.rs b/src/service/mod.rs index 385dcc69..07d80a15 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -77,7 +77,12 @@ impl Services { search: rooms::search::Service { db }, short: rooms::short::Service { db }, state: rooms::state::Service { db }, - state_accessor: rooms::state_accessor::Service { db }, + state_accessor: rooms::state_accessor::Service { + db, + server_visibility_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + }, state_cache: rooms::state_cache::Service { db }, state_compressor: rooms::state_compressor::Service { db, diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 169c8be8..4cff0736 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -4,7 +4,7 @@ use std::{ }; use async_trait::async_trait; -use ruma::{events::StateEventType, EventId, RoomId, ServerName}; +use ruma::{events::StateEventType, EventId, RoomId, UserId}; use crate::{PduEvent, Result}; @@ -35,11 +35,22 @@ pub trait Data: Send + Sync { state_key: &str, ) -> Result>>; + fn state_get_content( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result>; + /// Returns the state hash for this pdu. fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; - /// Returns true if a server has permission to see an event - fn server_can_see_event(&self, sever_name: &ServerName, event_id: &EventId) -> Result; + /// The user was a joined member at this state (potentially in the past) + fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> Result; + + /// The user was an invited or joined room member at this state (potentially + /// in the past) + fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> Result; /// Returns the full room state. async fn room_state_full( diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 89135da6..ce8bb2b9 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -1,16 +1,21 @@ mod data; use std::{ collections::{BTreeMap, HashMap}, - sync::Arc, + sync::{Arc, Mutex}, }; pub use data::Data; -use ruma::{events::StateEventType, EventId, RoomId, ServerName}; +use lru_cache::LruCache; +use ruma::{ + events::{room::history_visibility::HistoryVisibility, StateEventType}, + EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; -use crate::{PduEvent, Result}; +use crate::{services, PduEvent, Result}; pub struct Service { pub db: &'static dyn Data, + pub server_visibility_cache: Mutex>, } impl Service { @@ -49,19 +54,107 @@ impl Service { self.db.state_get(shortstatehash, event_type, state_key) } + pub fn state_get_content( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result> { + self.db + .state_get_content(shortstatehash, event_type, state_key) + } + /// Returns the state hash for this pdu. pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { self.db.pdu_shortstatehash(event_id) } - /// Returns true if a server has permission to see an event + /// Whether a server is allowed to see an event through federation, based on + /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self))] - pub fn server_can_see_event<'a>( - &'a self, - sever_name: &ServerName, + pub fn server_can_see_event( + &self, + server_name: &ServerName, + room_id: &RoomId, event_id: &EventId, ) -> Result { - self.db.server_can_see_event(sever_name, event_id) + let shortstatehash = match self.pdu_shortstatehash(event_id) { + Ok(Some(shortstatehash)) => shortstatehash, + _ => return Ok(false), + }; + + if let Some(visibility) = self + .server_visibility_cache + .lock() + .unwrap() + .get_mut(&(server_name.to_owned(), shortstatehash)) + { + return Ok(*visibility); + } + + let current_server_members: Vec = services() + .rooms + .state_cache + .room_members(room_id) + .filter(|member| { + member + .as_ref() + .map(|member| member.server_name() == server_name) + .unwrap_or(true) + }) + .collect::>()?; + + let history_visibility = self + .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? + .map(|content| match content.get("history_visibility") { + Some(visibility) => HistoryVisibility::from(visibility.as_str().unwrap_or("")), + None => HistoryVisibility::Invited, + }); + + let visibility = match history_visibility { + Some(HistoryVisibility::Joined) => { + // Look at all members in the room from this server; one of them + // triggered a backfill. Was one of them a member in the past, + // at this event? + let mut visible = false; + for member in current_server_members { + if self.user_was_joined(shortstatehash, &member)? { + visible = true; + break; + } + } + visible + } + Some(HistoryVisibility::Invited) => { + let mut visible = false; + for member in current_server_members { + if self.user_was_invited(shortstatehash, &member)? { + visible = true; + break; + } + } + visible + } + _ => false, + }; + + self.server_visibility_cache + .lock() + .unwrap() + .insert((server_name.to_owned(), shortstatehash), visibility); + + Ok(visibility) + } + + /// The user was a joined member at this state (potentially in the past) + pub fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> Result { + self.db.user_was_joined(shortstatehash, user_id) + } + + /// The user was an invited or joined room member at this state (potentially + /// in the past) + pub fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> Result { + self.db.user_was_invited(shortstatehash, user_id) } /// Returns the full room state. From 3518ee048d746c4a68689d5294774d3370462257 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 3 Sep 2022 14:16:32 +0200 Subject: [PATCH 04/53] fix: Add backfill ACL check --- src/api/server_server.rs | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b0b33b86..8dee3974 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -982,6 +982,11 @@ pub async fn get_backfill_route( )); } + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + let origin = services().globals.server_name().to_owned(); let earliest_events = &[]; From d47e1761ecb7db8e570fb68d72a942e41001850b Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 26 Nov 2022 13:03:19 +0100 Subject: [PATCH 05/53] fix: Proper S2S Backfill visibility handling --- src/service/rooms/state_accessor/mod.rs | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index ce8bb2b9..3b1f7060 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -112,30 +112,34 @@ impl Service { }); let visibility = match history_visibility { - Some(HistoryVisibility::Joined) => { - // Look at all members in the room from this server; one of them - // triggered a backfill. Was one of them a member in the past, - // at this event? + Some(HistoryVisibility::WorldReadable) => { + // Allow if event was sent while world readable + true + } + Some(HistoryVisibility::Invited) => { let mut visible = false; + // Allow if any member on requesting server was invited or joined, else deny for member in current_server_members { - if self.user_was_joined(shortstatehash, &member)? { + if self.user_was_invited(shortstatehash, &member)? + || self.user_was_joined(shortstatehash, &member)? + { visible = true; break; } } visible } - Some(HistoryVisibility::Invited) => { + _ => { + // Allow if any member on requested server was joined, else deny let mut visible = false; for member in current_server_members { - if self.user_was_invited(shortstatehash, &member)? { + if self.user_was_joined(shortstatehash, &member)? { visible = true; break; } } visible } - _ => false, }; self.server_visibility_cache From 5ae551b101b72ccd561910f5785d90c46e15cc77 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 26 Nov 2022 13:33:32 +0100 Subject: [PATCH 06/53] fix: Default to Shared history visibility for s2s permissions, per spec --- src/service/rooms/state_accessor/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 3b1f7060..b53488d0 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -108,7 +108,7 @@ impl Service { .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? .map(|content| match content.get("history_visibility") { Some(visibility) => HistoryVisibility::from(visibility.as_str().unwrap_or("")), - None => HistoryVisibility::Invited, + None => HistoryVisibility::Shared, }); let visibility = match history_visibility { From 04e5927e46b1123609e15ec0e576c3d032338768 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 31 Oct 2022 10:31:11 +0100 Subject: [PATCH 07/53] feat: Implement private read receipts, partial notification clearing --- src/api/client_server/read_marker.rs | 81 ++++++++----- src/api/client_server/sync.rs | 45 ++++++++ src/api/client_server/unversioned.rs | 5 +- .../key_value/rooms/edus/read_receipt.rs | 19 ++- src/database/key_value/rooms/user.rs | 12 +- src/service/rooms/edus/read_receipt/data.rs | 5 +- src/service/rooms/edus/read_receipt/mod.rs | 17 ++- src/service/rooms/timeline/mod.rs | 19 ++- src/service/rooms/user/data.rs | 8 +- src/service/rooms/user/mod.rs | 108 +++++++++++++++++- 10 files changed, 261 insertions(+), 58 deletions(-) diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index d529c6a8..68bcea34 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -34,29 +34,33 @@ pub async fn set_read_marker_route( )?; } - if body.private_read_receipt.is_some() || body.read_receipt.is_some() { - services() + if let Some(event) = &body.private_read_receipt { + let _pdu = services() .rooms - .user - .reset_notification_counts(sender_user, &body.room_id)?; - } + .timeline + .get_pdu(event)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?; - if let Some(event) = &body.private_read_receipt { services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, - services() - .rooms - .timeline - .get_pdu_count(event)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, + services().rooms.short.get_or_create_shorteventid(event)?, )?; } if let Some(event) = &body.read_receipt { + let _pdu = services() + .rooms + .timeline + .get_pdu(event)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?; + let mut user_receipts = BTreeMap::new(); user_receipts.insert( sender_user.clone(), @@ -80,6 +84,12 @@ pub async fn set_read_marker_route( room_id: body.room_id.clone(), }, )?; + + services().rooms.edus.read_receipt.private_read_set( + &body.room_id, + sender_user, + services().rooms.short.get_or_create_shorteventid(event)?, + )?; } Ok(set_read_marker::v3::Response {}) @@ -93,16 +103,6 @@ pub async fn create_receipt_route( ) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if matches!( - &body.receipt_type, - create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate - ) { - services() - .rooms - .user - .reset_notification_counts(sender_user, &body.room_id)?; - } - match body.receipt_type { create_receipt::v3::ReceiptType::FullyRead => { let fully_read_event = ruma::events::fully_read::FullyReadEvent { @@ -118,6 +118,16 @@ pub async fn create_receipt_route( )?; } create_receipt::v3::ReceiptType::Read => { + let _pdu = + services() + .rooms + .timeline + .get_pdu(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?; + let mut user_receipts = BTreeMap::new(); user_receipts.insert( sender_user.clone(), @@ -140,19 +150,34 @@ pub async fn create_receipt_route( room_id: body.room_id.clone(), }, )?; - } - create_receipt::v3::ReceiptType::ReadPrivate => { + services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, + services() + .rooms + .short + .get_or_create_shorteventid(&body.event_id)?, + )?; + } + create_receipt::v3::ReceiptType::ReadPrivate => { + let _pdu = services() .rooms .timeline - .get_pdu_count(&body.event_id)? + .get_pdu(&body.event_id)? .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event does not exist.", - ))?, + ))?; + + services().rooms.edus.read_receipt.private_read_set( + &body.room_id, + sender_user, + services() + .rooms + .short + .get_or_create_shorteventid(&body.event_id)?, )?; } _ => return Err(Error::bad_database("Unsupported receipt type")), diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 94e4f5bb..f63cf575 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -6,6 +6,7 @@ use ruma::{ uiaa::UiaaResponse, }, events::{ + receipt::{ReceiptThread, ReceiptType}, room::member::{MembershipState, RoomMemberEventContent}, RoomEventType, StateEventType, }, @@ -731,6 +732,50 @@ async fn sync_helper( .map(|(_, _, v)| v) .collect(); + if services() + .rooms + .edus + .read_receipt + .last_privateread_update(&sender_user, &room_id) + .unwrap_or(0) + > since + { + if let Ok(event_id) = services().rooms.short.get_eventid_from_short( + services() + .rooms + .edus + .read_receipt + .private_read_get(&room_id, &sender_user) + .expect("User did not have a valid private read receipt?") + .expect("User had a last read private receipt update but no receipt?"), + ) { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: None, + thread: ReceiptThread::Unthreaded, + }, + ); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::ReadPrivate, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert((*event_id).to_owned(), receipts); + + edus.push( + serde_json::from_str( + &serde_json::to_string(&ruma::events::SyncEphemeralRoomEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + }) + .expect("Did not get valid JSON?"), + ) + .expect("JSON was somehow invalid despite just being created"), + ); + } + }; + if services().rooms.edus.typing.last_typing_update(&room_id)? > since { edus.push( serde_json::from_str( diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 8a5c3d25..499c9e02 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -24,7 +24,10 @@ pub async fn get_supported_versions_route( "v1.1".to_owned(), "v1.2".to_owned(), ], - unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), + unstable_features: BTreeMap::from_iter([ + ("org.matrix.e2e_cross_signing".to_owned(), true), + ("org.matrix.msc2285.stable".to_owned(), true), + ]), }; Ok(resp) diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs index fa97ea34..4722cdc0 100644 --- a/src/database/key_value/rooms/edus/read_receipt.rs +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -105,16 +105,25 @@ impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { ) } - fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { + fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + shorteventid: u64, + ) -> Result<()> { let mut key = room_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(user_id.as_bytes()); - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; + if self.private_read_get(room_id, user_id)?.unwrap_or(0) < shorteventid { + self.roomuserid_privateread + .insert(&key, &shorteventid.to_be_bytes())?; - self.roomuserid_lastprivatereadupdate - .insert(&key, &services().globals.next_count()?.to_be_bytes()) + self.roomuserid_lastprivatereadupdate + .insert(&key, &services().globals.next_count()?.to_be_bytes()) + } else { + Ok(()) + } } fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs index 4c435720..63a13d36 100644 --- a/src/database/key_value/rooms/user.rs +++ b/src/database/key_value/rooms/user.rs @@ -3,7 +3,13 @@ use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; impl service::rooms::user::Data for KeyValueDatabase { - fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + fn update_notification_counts( + &self, + user_id: &UserId, + room_id: &RoomId, + notification_count: u64, + highlight_count: u64, + ) -> Result<()> { let mut userroom_id = user_id.as_bytes().to_vec(); userroom_id.push(0xff); userroom_id.extend_from_slice(room_id.as_bytes()); @@ -12,9 +18,9 @@ impl service::rooms::user::Data for KeyValueDatabase { roomuser_id.extend_from_slice(user_id.as_bytes()); self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; + .insert(&userroom_id, ¬ification_count.to_be_bytes())?; self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; + .insert(&userroom_id, &highlight_count.to_be_bytes())?; self.roomuserid_lastnotificationread.insert( &roomuser_id, diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index a183d196..7ebd3589 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -25,8 +25,9 @@ pub trait Data: Send + Sync { > + 'a, >; - /// Sets a private read marker at `count`. - fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; + /// Sets a private read marker at `shorteventid`. + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, shorteventid: u64) + -> Result<()>; /// Returns the private read marker. fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs index c6035280..a18a0dae 100644 --- a/src/service/rooms/edus/read_receipt/mod.rs +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -2,7 +2,7 @@ mod data; pub use data::Data; -use crate::Result; +use crate::{services, Result}; use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; pub struct Service { @@ -36,10 +36,19 @@ impl Service { self.db.readreceipts_since(room_id, since) } - /// Sets a private read marker at `count`. + /// Sets a private read marker at `shorteventid`. #[tracing::instrument(skip(self))] - pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { - self.db.private_read_set(room_id, user_id, count) + pub fn private_read_set( + &self, + room_id: &RoomId, + user_id: &UserId, + shorteventid: u64, + ) -> Result<()> { + self.db.private_read_set(room_id, user_id, shorteventid)?; + services() + .rooms + .user + .update_notification_counts(user_id, room_id) } /// Returns the private read marker. diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 619dca28..349cd60a 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -213,18 +213,17 @@ impl Service { ); let insert_lock = mutex_insert.lock().unwrap(); - let count1 = services().globals.next_count()?; + let _count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending // fails - services() - .rooms - .edus - .read_receipt - .private_read_set(&pdu.room_id, &pdu.sender, count1)?; - services() - .rooms - .user - .reset_notification_counts(&pdu.sender, &pdu.room_id)?; + services().rooms.edus.read_receipt.private_read_set( + &pdu.room_id, + &pdu.sender, + services() + .rooms + .short + .get_or_create_shorteventid(&pdu.event_id)?, + )?; let count2 = services().globals.next_count()?; let mut pdu_id = shortroomid.to_be_bytes().to_vec(); diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs index 4b8a4eca..90fc18bf 100644 --- a/src/service/rooms/user/data.rs +++ b/src/service/rooms/user/data.rs @@ -2,7 +2,13 @@ use crate::Result; use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { - fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn update_notification_counts( + &self, + user_id: &UserId, + room_id: &RoomId, + notification_count: u64, + highlight_count: u64, + ) -> Result<()>; fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs index 672e502d..c6c7867b 100644 --- a/src/service/rooms/user/mod.rs +++ b/src/service/rooms/user/mod.rs @@ -1,17 +1,117 @@ mod data; pub use data::Data; -use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; +use ruma::{ + events::{ + push_rules::PushRulesEvent, room::power_levels::RoomPowerLevelsEventContent, + GlobalAccountDataEventType, StateEventType, + }, + push::{Action, Ruleset, Tweak}, + OwnedRoomId, OwnedUserId, RoomId, UserId, +}; -use crate::Result; +use crate::{services, Error, Result}; pub struct Service { pub db: &'static dyn Data, } impl Service { - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - self.db.reset_notification_counts(user_id, room_id) + pub fn update_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let power_levels: RoomPowerLevelsEventContent = services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + let read_event = services() + .rooms + .edus + .read_receipt + .private_read_get(room_id, user_id) + .unwrap_or(None) + .unwrap_or(0u64); + let mut notification_count = 0u64; + let mut highlight_count = 0u64; + + services() + .rooms + .timeline + .pdus_since(user_id, room_id, read_event)? + .filter_map(|pdu| pdu.ok()) + .map(|(_, pdu)| pdu) + .filter(|pdu| { + // Don't include user's own messages in notification counts + user_id != &pdu.sender + && services() + .rooms + .short + .get_or_create_shorteventid(&pdu.event_id) + .unwrap_or(0) + != read_event + }) + .filter_map(|pdu| { + let rules_for_user = services() + .account_data + .get( + None, + user_id, + GlobalAccountDataEventType::PushRules.to_string().into(), + ) + .ok()? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid push rules event in db.")) + }) + .transpose() + .ok()? + .map(|ev: PushRulesEvent| ev.content.global) + .unwrap_or_else(|| Ruleset::server_default(user_id)); + + let mut highlight = false; + let mut notify = false; + + for action in services() + .pusher + .get_actions( + user_id, + &rules_for_user, + &power_levels, + &pdu.to_sync_room_event(), + &pdu.room_id, + ) + .ok()? + { + match action { + Action::DontNotify => notify = false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => notify = true, + Action::SetTweak(Tweak::Highlight(true)) => { + highlight = true; + } + _ => {} + }; + } + + if notify { + notification_count += 1; + }; + + if highlight { + highlight_count += 1; + }; + + Some(()) + }) + .for_each(|_| {}); + + self.db + .update_notification_counts(user_id, room_id, notification_count, highlight_count) } pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { From 96c2cb946996d28ce033af82fb75a46da783ebed Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 26 Nov 2022 14:53:57 +0100 Subject: [PATCH 08/53] feat: Add config option for disabling sending public read receipts Treats requests like private receipts --- src/api/client_server/read_marker.rs | 90 ++++++++++++++-------------- src/config/mod.rs | 2 + src/service/globals/mod.rs | 4 ++ 3 files changed, 52 insertions(+), 44 deletions(-) diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs index 68bcea34..ea15637b 100644 --- a/src/api/client_server/read_marker.rs +++ b/src/api/client_server/read_marker.rs @@ -61,30 +61,31 @@ pub async fn set_read_marker_route( "Event does not exist.", ))?; - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_user.clone(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - }, - ); - - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); + if services().globals.allow_public_read_receipts() { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + ); - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event.to_owned(), receipts); + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); - services().rooms.edus.read_receipt.readreceipt_update( - sender_user, - &body.room_id, - ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )?; + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event.to_owned(), receipts); + services().rooms.edus.read_receipt.readreceipt_update( + sender_user, + &body.room_id, + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )?; + }; services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, @@ -128,29 +129,30 @@ pub async fn create_receipt_route( "Event does not exist.", ))?; - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_user.clone(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - thread: ReceiptThread::Unthreaded, - }, - ); - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(body.event_id.to_owned(), receipts); - - services().rooms.edus.read_receipt.readreceipt_update( - sender_user, - &body.room_id, - ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }, - )?; - + if services().globals.allow_public_read_receipts() { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + ); + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(body.event_id.to_owned(), receipts); + + services().rooms.edus.read_receipt.readreceipt_update( + sender_user, + &body.room_id, + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )?; + }; services().rooms.edus.read_receipt.private_read_set( &body.room_id, sender_user, diff --git a/src/config/mod.rs b/src/config/mod.rs index 6b862bb6..10018451 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -47,6 +47,8 @@ pub struct Config { #[serde(default = "false_fn")] pub allow_federation: bool, #[serde(default = "true_fn")] + pub allow_public_read_receipts: bool, + #[serde(default = "true_fn")] pub allow_room_creation: bool, #[serde(default = "true_fn")] pub allow_unstable_room_versions: bool, diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index affc0516..b9087626 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -234,6 +234,10 @@ impl Service { self.config.allow_federation } + pub fn allow_public_read_receipts(&self) -> bool { + self.config.allow_public_read_receipts + } + pub fn allow_room_creation(&self) -> bool { self.config.allow_room_creation } From c366e0a5ce1881517ea145e3eba05a7bc833cc3e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 26 Nov 2022 15:01:12 +0100 Subject: [PATCH 09/53] feat: Add config option for receiving read receipts Adds an option for ignoring incoming read receipts over federation --- src/api/server_server.rs | 72 ++++++++++++++++++++------------------ src/config/mod.rs | 2 ++ src/service/globals/mod.rs | 4 +++ 3 files changed, 43 insertions(+), 35 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b7f88078..e39a6d2d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -748,43 +748,45 @@ pub async fn send_transaction_message_route( match edu { Edu::Presence(_) => {} Edu::Receipt(receipt) => { - for (room_id, room_updates) in receipt.receipts { - for (user_id, user_updates) in room_updates.read { - if let Some((event_id, _)) = user_updates - .event_ids - .iter() - .filter_map(|id| { + if services().globals.allow_receiving_read_receipts() { + for (room_id, room_updates) in receipt.receipts { + for (user_id, user_updates) in room_updates.read { + if let Some((event_id, _)) = user_updates + .event_ids + .iter() + .filter_map(|id| { + services() + .rooms + .timeline + .get_pdu_count(id) + .ok() + .flatten() + .map(|r| (id, r)) + }) + .max_by_key(|(_, count)| *count) + { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert(user_id.clone(), user_updates.data); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event_id.to_owned(), receipts); + + let event = ReceiptEvent { + content: ReceiptEventContent(receipt_content), + room_id: room_id.clone(), + }; services() .rooms - .timeline - .get_pdu_count(id) - .ok() - .flatten() - .map(|r| (id, r)) - }) - .max_by_key(|(_, count)| *count) - { - let mut user_receipts = BTreeMap::new(); - user_receipts.insert(user_id.clone(), user_updates.data); - - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event_id.to_owned(), receipts); - - let event = ReceiptEvent { - content: ReceiptEventContent(receipt_content), - room_id: room_id.clone(), - }; - services() - .rooms - .edus - .read_receipt - .readreceipt_update(&user_id, &room_id, event)?; - } else { - // TODO fetch missing events - info!("No known event ids in read receipt: {:?}", user_updates); + .edus + .read_receipt + .readreceipt_update(&user_id, &room_id, event)?; + } else { + // TODO fetch missing events + info!("No known event ids in read receipt: {:?}", user_updates); + } } } } diff --git a/src/config/mod.rs b/src/config/mod.rs index 10018451..64744428 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -49,6 +49,8 @@ pub struct Config { #[serde(default = "true_fn")] pub allow_public_read_receipts: bool, #[serde(default = "true_fn")] + pub allow_receiving_read_receipts: bool, + #[serde(default = "true_fn")] pub allow_room_creation: bool, #[serde(default = "true_fn")] pub allow_unstable_room_versions: bool, diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index b9087626..d1f95ded 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -238,6 +238,10 @@ impl Service { self.config.allow_public_read_receipts } + pub fn allow_receiving_read_receipts(&self) -> bool { + self.config.allow_receiving_read_receipts + } + pub fn allow_room_creation(&self) -> bool { self.config.allow_room_creation } From cd5a83d4e2f59eac2fbad42cfa2b3c290cc173d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Thu, 17 Nov 2022 19:18:28 +0100 Subject: [PATCH 10/53] feat(presence): start presence timeout implementation --- src/database/key_value/rooms/edus/presence.rs | 37 +++++++++++++------ src/service/mod.rs | 2 +- src/service/rooms/edus/presence/data.rs | 4 ++ src/service/rooms/edus/presence/mod.rs | 31 +++++++++++++++- 4 files changed, 61 insertions(+), 13 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 904b1c44..a72f1136 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,8 +1,10 @@ -use std::collections::HashMap; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use std::{collections::HashMap, time::Duration}; use ruma::{ events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, }; +use tokio::{sync::mpsc, time::sleep}; use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; @@ -109,24 +111,37 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Ok(hashmap) } - /* - fn presence_maintain(&self, db: Arc>) { - // TODO @M0dEx: move this to a timed tasks module + fn presence_maintain( + &self, + mut timer_receiver: mpsc::UnboundedReceiver>, + ) -> Result<()> { + let mut timers = FuturesUnordered::new(); + tokio::spawn(async move { loop { - select! { - Some(user_id) = self.presence_timers.next() { - // TODO @M0dEx: would it be better to acquire the lock outside the loop? - let guard = db.read().await; + tokio::select! { + Some(_user_id) = timers.next() => { + // TODO: Handle presence timeouts + } + Some(user_id) = timer_receiver.recv() => { + // Idle timeout + timers.push(create_presence_timer(Duration::from_secs(60), user_id.clone())); - // TODO @M0dEx: add self.presence_timers - // TODO @M0dEx: maintain presence + // Offline timeout + timers.push(create_presence_timer(Duration::from_secs(60*15) , user_id)); } } } }); + + Ok(()) } - */ +} + +async fn create_presence_timer(duration: Duration, user_id: Box) -> Box { + sleep(duration).await; + + user_id } fn parse_presence_event(bytes: &[u8]) -> Result { diff --git a/src/service/mod.rs b/src/service/mod.rs index 385dcc69..6858ce1e 100644 --- a/src/service/mod.rs +++ b/src/service/mod.rs @@ -62,7 +62,7 @@ impl Services { auth_chain: rooms::auth_chain::Service { db }, directory: rooms::directory::Service { db }, edus: rooms::edus::Service { - presence: rooms::edus::presence::Service { db }, + presence: rooms::edus::presence::Service::build(db)?, read_receipt: rooms::edus::read_receipt::Service { db }, typing: rooms::edus::typing::Service { db }, }, diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 53329e08..9c016705 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use crate::Result; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; +use tokio::sync::mpsc; pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. @@ -35,4 +36,7 @@ pub trait Data: Send + Sync { room_id: &RoomId, since: u64, ) -> Result>; + + fn presence_maintain(&self, timer_receiver: mpsc::UnboundedReceiver>) + -> Result<()>; } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 860aea18..23194dd1 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -3,14 +3,30 @@ use std::collections::HashMap; pub use data::Data; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; +use tokio::sync::mpsc; -use crate::Result; +use crate::{Error, Result}; pub struct Service { pub db: &'static dyn Data, + + // Presence timers + timer_sender: mpsc::UnboundedSender>, } impl Service { + pub fn build(db: &'static dyn Data) -> Result { + let (sender, receiver) = mpsc::unbounded_channel(); + let service = Self { + db, + timer_sender: sender, + }; + + service.presence_maintain(receiver)?; + + Ok(service) + } + /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to @@ -21,11 +37,17 @@ impl Service { room_id: &RoomId, presence: PresenceEvent, ) -> Result<()> { + self.timer_sender + .send(user_id.into()) + .map_err(|_| Error::bad_database("Sender errored out"))?; self.db.update_presence(user_id, room_id, presence) } /// Resets the presence timeout, so the user will stay in their current presence state. pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { + self.timer_sender + .send(user_id.into()) + .map_err(|_| Error::bad_database("Sender errored out"))?; self.db.ping_presence(user_id) } @@ -42,6 +64,13 @@ impl Service { self.db.get_presence_event(room_id, user_id, last_update) } + pub fn presence_maintain( + &self, + timer_receiver: mpsc::UnboundedReceiver>, + ) -> Result<()> { + self.db.presence_maintain(timer_receiver) + } + /* TODO /// Sets all users to offline who have been quiet for too long. fn _presence_maintain( From deeffde6793eaa52bcb4e5aef1147a404f53c20d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Thu, 17 Nov 2022 22:48:57 +0100 Subject: [PATCH 11/53] feat(presence): restructure database trees for presence --- src/database/key_value/rooms/edus/presence.rs | 83 +++++++++++-------- src/database/mod.rs | 8 +- src/service/rooms/edus/presence/data.rs | 2 +- 3 files changed, 52 insertions(+), 41 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index a72f1136..453e2dc0 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -6,7 +6,29 @@ use ruma::{ }; use tokio::{sync::mpsc, time::sleep}; -use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; +use crate::{ + database::KeyValueDatabase, service, services, utils, utils::u64_from_bytes, Error, Result, +}; +use crate::utils::millis_since_unix_epoch; + +pub struct PresenceUpdate { + count: u64, + timestamp: u64, +} + +impl PresenceUpdate { + fn to_be_bytes(&self) -> &[u8] { + &*([self.count.to_be_bytes(), self.timestamp.to_be_bytes()].concat()) + } + + fn from_be_bytes(bytes: &[u8]) -> Result { + let (count_bytes, timestamp_bytes) = bytes.split_at(bytes.len() / 2); + Ok(Self { + count: u64_from_bytes(count_bytes)?, + timestamp: u64_from_bytes(timestamp_bytes)?, + }) + } +} impl service::rooms::edus::presence::Data for KeyValueDatabase { fn update_presence( @@ -15,45 +37,41 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { room_id: &RoomId, presence: PresenceEvent, ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = services().globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); + let mut roomuser_id = [room_id.as_bytes(), 0xff, user_id.as_bytes()].concat(); - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), + self.roomuserid_presenceevent.insert( + &roomuser_id, + &serde_json::to_vec(&presence)?, )?; - self.userid_lastpresenceupdate.insert( + self.userid_presenceupdate.insert( user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), + PresenceUpdate { + count: services().globals.next_count()?, + timestamp: millis_since_unix_epoch(), + }.to_be_bytes(), )?; Ok(()) } fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( + self.userid_presenceupdate.insert( user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), + PresenceUpdate { + count: services().globals.current_count()?, + timestamp: millis_since_unix_epoch(), + }.to_be_bytes() )?; Ok(()) } fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate + self.userid_presenceupdate .get(user_id.as_bytes())? .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) + PresenceUpdate::from_be_bytes(bytes)?.timestamp }) .transpose() } @@ -62,17 +80,12 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { &self, room_id: &RoomId, user_id: &UserId, - count: u64, + presence_timestamp: u64 ) -> Result> { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| parse_presence_event(&value)) + let mut roomuser_id = [room_id.as_bytes(), 0xff, user_id.as_bytes()].concat(); + self.roomuserid_presenceevent + .get(&roomuser_id)? + .map(|value| parse_presence_event(&value, presence_timestamp)) .transpose() } @@ -144,13 +157,11 @@ async fn create_presence_timer(duration: Duration, user_id: Box) -> Box< user_id } -fn parse_presence_event(bytes: &[u8]) -> Result { +fn parse_presence_event(bytes: &[u8], presence_timestamp: u64) -> Result { let mut presence: PresenceEvent = serde_json::from_slice(bytes) .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); + let current_timestamp: UInt = millis_since_unix_epoch().try_into()?; if presence.content.presence == PresenceState::Online { // Don't set last_active_ago when the user is online @@ -160,7 +171,7 @@ fn parse_presence_event(bytes: &[u8]) -> Result { presence.content.last_active_ago = presence .content .last_active_ago - .map(|timestamp| current_timestamp - timestamp); + .map(|timestamp| current_timestamp - presence_timestamp); } Ok(presence) diff --git a/src/database/mod.rs b/src/database/mod.rs index 3746efef..0797a136 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -65,8 +65,8 @@ pub struct KeyValueDatabase { pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId - pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count + pub(super) userid_presenceupdate: Arc, // PresenceUpdate = Count + Timestamp + pub(super) roomuserid_presenceevent: Arc, // PresenceEvent //pub rooms: rooms::Rooms, pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count @@ -288,8 +288,8 @@ impl KeyValueDatabase { .open_tree("roomuserid_lastprivatereadupdate")?, typingid_userid: builder.open_tree("typingid_userid")?, roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, - presenceid_presence: builder.open_tree("presenceid_presence")?, - userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, + userid_presenceupdate: builder.open_tree("userid_presenceupdate")?, + roomuserid_presenceevent: builder.open_tree("roomuserid_presenceevent")?, pduid_pdu: builder.open_tree("pduid_pdu")?, eventid_pduid: builder.open_tree("eventid_pduid")?, roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 9c016705..216313fe 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -27,7 +27,7 @@ pub trait Data: Send + Sync { &self, room_id: &RoomId, user_id: &UserId, - count: u64, + presence_timestamp: u64, ) -> Result>; /// Returns the most recent presence updates that happened after the event with id `since`. From f956e727e4dc5ad0aff0a3e6a69025931d3ec8da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Thu, 17 Nov 2022 23:26:56 +0100 Subject: [PATCH 12/53] feat(presence): refactor presence_since --- src/database/key_value/rooms/edus/presence.rs | 94 +++++++++++-------- 1 file changed, 54 insertions(+), 40 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 453e2dc0..763572ec 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -93,35 +93,31 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { &self, room_id: &RoomId, since: u64, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::parse( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let presence = parse_presence_event(&value)?; - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) + ) -> Result>> { + let services = &services(); + let mut user_timestamp: HashMap = self.userid_presenceupdate + .iter() + .map(|(user_id_bytes, update_bytes)| (UserId::parse(utils::string_from_bytes(user_id_bytes)), PresenceUpdate::from_be_bytes(update_bytes)?)) + .filter_map(|(user_id, presence_update)| { + if presence_update.count <= since || !services.rooms.state_cache.is_joined(user_id, room_id)? { + return None + } + + Some((user_id, presence_update.timestamp)) + }) + .collect(); + + Ok( + self.roomuserid_presenceevent + .iter() + .filter_map(|user_id_bytes, presence_bytes| (UserId::parse(utils::string_from_bytes(user_id_bytes)), presence_bytes)) + .filter_map(|user_id, presence_bytes| { + let timestamp = user_timestamp.get(user_id)?; + + Some((user_id, parse_presence_event(presence_bytes, *timestamp)?)) + }) + .into_iter() + ) } fn presence_maintain( @@ -161,18 +157,36 @@ fn parse_presence_event(bytes: &[u8], presence_timestamp: u64) -> Result PresenceState { + let globals = &services().globals; - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; + return if last_active_ago < globals.presence_idle_timeout() { + PresenceState::Online + } else if last_active_ago < globals.presence_offline_timeout() { + PresenceState::Unavailable } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - presence_timestamp); - } + PresenceState::Offline + }; +} - Ok(presence) +/// Translates the timestamp representing last_active_ago to a diff from now. +fn translate_active_ago( + presence_event: &mut PresenceEvent, + last_active_ts: u64, +) { + let last_active_ago = millis_since_unix_epoch().saturating_sub(last_active_ts); + + presence_event.content.presence = determine_presence_state(last_active_ago); + + presence_event.content.last_active_ago = match presence_event.content.presence { + PresenceState::Online => None, + _ => Some(UInt::new_saturating(last_active_ago)), + } } From e7348621bd9ba1d7a5b623437343bdac899338b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Fri, 18 Nov 2022 17:18:20 +0100 Subject: [PATCH 13/53] feat(presence): implement most features for PoC --- src/api/client_server/presence.rs | 2 +- src/api/server_server.rs | 29 +++- src/config/mod.rs | 13 ++ src/database/key_value/rooms/edus/presence.rs | 164 +++++++++++++----- src/database/mod.rs | 5 +- src/service/globals/mod.rs | 8 + src/service/rooms/edus/presence/data.rs | 10 +- src/service/rooms/edus/presence/mod.rs | 81 +-------- 8 files changed, 183 insertions(+), 129 deletions(-) diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index dfac3dbd..7afda962 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -60,7 +60,7 @@ pub async fn get_presence_route( .rooms .edus .presence - .get_last_presence_event(sender_user, &room_id)? + .get_presence_event(sender_user, &room_id)? { presence_event = Some(presence); break; diff --git a/src/api/server_server.rs b/src/api/server_server.rs index b7f88078..9154b3ef 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -33,6 +33,7 @@ use ruma::{ }, directory::{IncomingFilter, IncomingRoomNetwork}, events::{ + presence::{PresenceEvent, PresenceEventContent}, receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, room::{ join_rules::{JoinRule, RoomJoinRulesEventContent}, @@ -746,7 +747,33 @@ pub async fn send_transaction_message_route( .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) { match edu { - Edu::Presence(_) => {} + Edu::Presence(presence) => { + for presence_update in presence.push { + let user_id = presence_update.user_id; + for room_id in services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(|room_id| room_id.ok()) + { + services().rooms.edus.presence.update_presence( + &user_id, + &room_id, + PresenceEvent { + content: PresenceEventContent { + avatar_url: services().users.avatar_url(&user_id)?, + currently_active: Some(presence_update.currently_active), + displayname: services().users.displayname(&user_id)?, + last_active_ago: Some(presence_update.last_active_ago), + presence: presence_update.presence.clone(), + status_msg: presence_update.status_msg.clone(), + }, + sender: user_id.clone(), + }, + )?; + } + } + } Edu::Receipt(receipt) => { for (room_id, room_updates) in receipt.receipts { for (user_id, user_updates) in room_updates.read { diff --git a/src/config/mod.rs b/src/config/mod.rs index 6b862bb6..78724ab2 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -76,6 +76,11 @@ pub struct Config { pub emergency_password: Option, + #[serde(default = "default_presence_idle_timeout")] + pub presence_idle_timeout: u64, + #[serde(default = "default_presence_offline_timeout")] + pub presence_offline_timeout: u64, + #[serde(flatten)] pub catchall: BTreeMap, } @@ -257,6 +262,14 @@ fn default_turn_ttl() -> u64 { 60 * 60 * 24 } +fn default_presence_idle_timeout() -> u64 { + 1 * 60 as u64 +} + +fn default_presence_offline_timeout() -> u64 { + 15 * 60 as u64 +} + // I know, it's a great name pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V9 diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 763572ec..dae6f759 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,5 +1,7 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; +use ruma::user_id; use std::{collections::HashMap, time::Duration}; +use tracing::error; use ruma::{ events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, @@ -7,9 +9,11 @@ use ruma::{ use tokio::{sync::mpsc, time::sleep}; use crate::{ - database::KeyValueDatabase, service, services, utils, utils::u64_from_bytes, Error, Result, + database::KeyValueDatabase, + service, services, utils, + utils::{millis_since_unix_epoch, u64_from_bytes}, + Error, Result, }; -use crate::utils::millis_since_unix_epoch; pub struct PresenceUpdate { count: u64, @@ -17,15 +21,15 @@ pub struct PresenceUpdate { } impl PresenceUpdate { - fn to_be_bytes(&self) -> &[u8] { - &*([self.count.to_be_bytes(), self.timestamp.to_be_bytes()].concat()) + fn to_be_bytes(&self) -> Vec { + [self.count.to_be_bytes(), self.timestamp.to_be_bytes()].concat() } fn from_be_bytes(bytes: &[u8]) -> Result { let (count_bytes, timestamp_bytes) = bytes.split_at(bytes.len() / 2); Ok(Self { - count: u64_from_bytes(count_bytes)?, - timestamp: u64_from_bytes(timestamp_bytes)?, + count: u64_from_bytes(count_bytes).expect("count bytes from DB are valid"), + timestamp: u64_from_bytes(timestamp_bytes).expect("timestamp bytes from DB are valid"), }) } } @@ -37,19 +41,23 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { room_id: &RoomId, presence: PresenceEvent, ) -> Result<()> { - let mut roomuser_id = [room_id.as_bytes(), 0xff, user_id.as_bytes()].concat(); + let roomuser_id = [room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat(); self.roomuserid_presenceevent.insert( &roomuser_id, - &serde_json::to_vec(&presence)?, + &serde_json::to_vec(&presence).expect("presence event from DB is valid"), )?; self.userid_presenceupdate.insert( user_id.as_bytes(), - PresenceUpdate { + &*PresenceUpdate { count: services().globals.next_count()?, - timestamp: millis_since_unix_epoch(), - }.to_be_bytes(), + timestamp: match presence.content.last_active_ago { + Some(active_ago) => millis_since_unix_epoch().saturating_sub(active_ago.into()), + None => millis_since_unix_epoch(), + }, + } + .to_be_bytes(), )?; Ok(()) @@ -58,10 +66,11 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { fn ping_presence(&self, user_id: &UserId) -> Result<()> { self.userid_presenceupdate.insert( user_id.as_bytes(), - PresenceUpdate { + &*PresenceUpdate { count: services().globals.current_count()?, timestamp: millis_since_unix_epoch(), - }.to_be_bytes() + } + .to_be_bytes(), )?; Ok(()) @@ -70,9 +79,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { fn last_presence_update(&self, user_id: &UserId) -> Result> { self.userid_presenceupdate .get(user_id.as_bytes())? - .map(|bytes| { - PresenceUpdate::from_be_bytes(bytes)?.timestamp - }) + .map(|bytes| PresenceUpdate::from_be_bytes(&bytes).map(|update| update.timestamp)) .transpose() } @@ -80,57 +87,131 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { &self, room_id: &RoomId, user_id: &UserId, - presence_timestamp: u64 + presence_timestamp: u64, ) -> Result> { - let mut roomuser_id = [room_id.as_bytes(), 0xff, user_id.as_bytes()].concat(); + let roomuser_id = [room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat(); self.roomuserid_presenceevent .get(&roomuser_id)? .map(|value| parse_presence_event(&value, presence_timestamp)) .transpose() } - fn presence_since( - &self, + fn presence_since<'a>( + &'a self, room_id: &RoomId, since: u64, - ) -> Result>> { + ) -> Result + 'a>> { let services = &services(); - let mut user_timestamp: HashMap = self.userid_presenceupdate + let user_timestamp: HashMap = self + .userid_presenceupdate .iter() - .map(|(user_id_bytes, update_bytes)| (UserId::parse(utils::string_from_bytes(user_id_bytes)), PresenceUpdate::from_be_bytes(update_bytes)?)) + .filter_map(|(user_id_bytes, update_bytes)| { + Some(( + OwnedUserId::from( + UserId::parse(utils::string_from_bytes(&user_id_bytes).ok()?).ok()?, + ), + PresenceUpdate::from_be_bytes(&update_bytes).ok()?, + )) + }) .filter_map(|(user_id, presence_update)| { - if presence_update.count <= since || !services.rooms.state_cache.is_joined(user_id, room_id)? { - return None + if presence_update.count <= since + || !services + .rooms + .state_cache + .is_joined(&user_id, room_id) + .ok()? + { + return None; } Some((user_id, presence_update.timestamp)) }) .collect(); - Ok( + Ok(Box::new( self.roomuserid_presenceevent .iter() - .filter_map(|user_id_bytes, presence_bytes| (UserId::parse(utils::string_from_bytes(user_id_bytes)), presence_bytes)) - .filter_map(|user_id, presence_bytes| { - let timestamp = user_timestamp.get(user_id)?; - - Some((user_id, parse_presence_event(presence_bytes, *timestamp)?)) + .filter_map(|(user_id_bytes, presence_bytes)| { + Some(( + OwnedUserId::from( + UserId::parse(utils::string_from_bytes(&user_id_bytes).ok()?).ok()?, + ), + presence_bytes, + )) }) - .into_iter() - ) + .filter_map( + move |(user_id, presence_bytes)| -> Option<(OwnedUserId, PresenceEvent)> { + let timestamp = user_timestamp.get(&user_id)?; + + Some(( + user_id, + parse_presence_event(&presence_bytes, *timestamp).ok()?, + )) + }, + ), + )) } fn presence_maintain( &self, - mut timer_receiver: mpsc::UnboundedReceiver>, + mut timer_receiver: mpsc::UnboundedReceiver, ) -> Result<()> { let mut timers = FuturesUnordered::new(); + // TODO: Get rid of this hack + timers.push(create_presence_timer( + Duration::from_secs(60), + user_id!("@test:test.com").to_owned(), + )); + tokio::spawn(async move { loop { tokio::select! { - Some(_user_id) = timers.next() => { - // TODO: Handle presence timeouts + Some(user_id) = timers.next() => { + let presence_timestamp = match services().rooms.edus.presence.last_presence_update(&user_id) { + Ok(timestamp) => match timestamp { + Some(timestamp) => timestamp, + None => continue, + }, + Err(e) => { + error!("{e}"); + continue; + } + }; + + let presence_state = determine_presence_state(presence_timestamp); + + // Continue if there is no change in state + if presence_state != PresenceState::Offline { + continue; + } + + for room_id in services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(|room_id| room_id.ok()) { + let presence_event = match services().rooms.edus.presence.get_presence_event(&user_id, &room_id) { + Ok(event) => match event { + Some(event) => event, + None => continue, + }, + Err(e) => { + error!("{e}"); + continue; + } + }; + + match services().rooms.edus.presence.update_presence(&user_id, &room_id, presence_event) { + Ok(()) => (), + Err(e) => { + error!("{e}"); + continue; + } + } + + // TODO: Send event over federation + } } Some(user_id) = timer_receiver.recv() => { // Idle timeout @@ -147,7 +228,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { } } -async fn create_presence_timer(duration: Duration, user_id: Box) -> Box { +async fn create_presence_timer(duration: Duration, user_id: OwnedUserId) -> OwnedUserId { sleep(duration).await; user_id @@ -162,9 +243,7 @@ fn parse_presence_event(bytes: &[u8], presence_timestamp: u64) -> Result PresenceState { +fn determine_presence_state(last_active_ago: u64) -> PresenceState { let globals = &services().globals; return if last_active_ago < globals.presence_idle_timeout() { @@ -177,10 +256,7 @@ fn determine_presence_state( } /// Translates the timestamp representing last_active_ago to a diff from now. -fn translate_active_ago( - presence_event: &mut PresenceEvent, - last_active_ts: u64, -) { +fn translate_active_ago(presence_event: &mut PresenceEvent, last_active_ts: u64) { let last_active_ago = millis_since_unix_epoch().saturating_sub(last_active_ts); presence_event.content.presence = determine_presence_state(last_active_ago); diff --git a/src/database/mod.rs b/src/database/mod.rs index 0797a136..7baa512a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -65,7 +65,7 @@ pub struct KeyValueDatabase { pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(super) userid_presenceupdate: Arc, // PresenceUpdate = Count + Timestamp + pub(super) userid_presenceupdate: Arc, // PresenceUpdate = Count + Timestamp pub(super) roomuserid_presenceevent: Arc, // PresenceEvent //pub rooms: rooms::Rooms, @@ -825,9 +825,6 @@ impl KeyValueDatabase { ); } - // This data is probably outdated - db.presenceid_presence.clear()?; - services().admin.start_handler(); // Set emergency access for the conduit user diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index affc0516..94e3fb97 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -286,6 +286,14 @@ impl Service { &self.config.emergency_password } + pub fn presence_idle_timeout(&self) -> u64 { + self.config.presence_idle_timeout + } + + pub fn presence_offline_timeout(&self) -> u64 { + self.config.presence_offline_timeout + } + pub fn supported_room_versions(&self) -> Vec { let mut room_versions: Vec = vec![]; room_versions.extend(self.stable_room_versions.clone()); diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 216313fe..5dc4c3cb 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -1,5 +1,3 @@ -use std::collections::HashMap; - use crate::Result; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; use tokio::sync::mpsc; @@ -31,12 +29,12 @@ pub trait Data: Send + Sync { ) -> Result>; /// Returns the most recent presence updates that happened after the event with id `since`. - fn presence_since( - &self, + fn presence_since<'a>( + &'a self, room_id: &RoomId, since: u64, - ) -> Result>; + ) -> Result + 'a>>; - fn presence_maintain(&self, timer_receiver: mpsc::UnboundedReceiver>) + fn presence_maintain(&self, timer_receiver: mpsc::UnboundedReceiver) -> Result<()>; } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 23194dd1..faac5c76 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -1,5 +1,4 @@ mod data; -use std::collections::HashMap; pub use data::Data; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; @@ -11,7 +10,7 @@ pub struct Service { pub db: &'static dyn Data, // Presence timers - timer_sender: mpsc::UnboundedSender>, + timer_sender: mpsc::UnboundedSender, } impl Service { @@ -51,7 +50,11 @@ impl Service { self.db.ping_presence(user_id) } - pub fn get_last_presence_event( + pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + self.db.last_presence_update(user_id) + } + + pub fn get_presence_event( &self, user_id: &UserId, room_id: &RoomId, @@ -66,86 +69,18 @@ impl Service { pub fn presence_maintain( &self, - timer_receiver: mpsc::UnboundedReceiver>, + timer_receiver: mpsc::UnboundedReceiver, ) -> Result<()> { self.db.presence_maintain(timer_receiver) } - /* TODO - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.to_owned(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - }*/ - /// Returns the most recent presence updates that happened after the event with id `since`. #[tracing::instrument(skip(self, since, room_id))] pub fn presence_since( &self, room_id: &RoomId, since: u64, - ) -> Result> { + ) -> Result>> { self.db.presence_since(room_id, since) } } From b6541d207db03bc475901461f9680019bcac29c5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Fri, 18 Nov 2022 17:21:17 +0100 Subject: [PATCH 14/53] fix(presence): fix default timeout values --- src/config/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 78724ab2..0058dfae 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -263,11 +263,11 @@ fn default_turn_ttl() -> u64 { } fn default_presence_idle_timeout() -> u64 { - 1 * 60 as u64 + 1 * 60 * 1000 as u64 } fn default_presence_offline_timeout() -> u64 { - 15 * 60 as u64 + 15 * 60 * 1000 as u64 } // I know, it's a great name From b18b228c7cc95da17fc60bef0ca8283ca88fe6d4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Fri, 18 Nov 2022 22:32:50 +0100 Subject: [PATCH 15/53] fix(presence): fix issues found when testing --- src/api/client_server/presence.rs | 19 ++- src/api/client_server/profile.rs | 2 + src/api/client_server/sync.rs | 11 +- src/api/server_server.rs | 1 + src/database/key_value/rooms/edus/presence.rs | 155 +++++++++++------- src/database/mod.rs | 3 + src/service/rooms/edus/presence/data.rs | 4 +- src/service/rooms/edus/presence/mod.rs | 58 ++++--- 8 files changed, 159 insertions(+), 94 deletions(-) diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index 7afda962..f363933e 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,5 +1,5 @@ -use crate::{services, utils, Result, Ruma}; -use ruma::api::client::presence::{get_presence, set_presence}; +use crate::{services, Result, Ruma}; +use ruma::{api::client::presence::{get_presence, set_presence}, uint, presence::PresenceState}; use std::time::Duration; /// # `PUT /_matrix/client/r0/presence/{userId}/status` @@ -21,16 +21,13 @@ pub async fn set_presence_route( avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, displayname: services().users.displayname(sender_user)?, - last_active_ago: Some( - utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - ), + last_active_ago: Some(uint!(0)), presence: body.presence.clone(), status_msg: body.status_msg.clone(), }, sender: sender_user.clone(), }, + true )?; } @@ -69,7 +66,6 @@ pub async fn get_presence_route( if let Some(presence) = presence_event { Ok(get_presence::v3::Response { - // TODO: Should ruma just use the presenceeventcontent type here? status_msg: presence.content.status_msg, currently_active: presence.content.currently_active, last_active_ago: presence @@ -79,6 +75,11 @@ pub async fn get_presence_route( presence: presence.content.presence, }) } else { - todo!(); + Ok(get_presence::v3::Response { + status_msg: None, + currently_active: None, + last_active_ago: None, + presence: PresenceState::Offline, + }) } } diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 5ace1777..0e667290 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -109,6 +109,7 @@ pub async fn set_displayname_route( }, sender: sender_user.clone(), }, + true )?; } @@ -244,6 +245,7 @@ pub async fn set_avatar_url_route( }, sender: sender_user.clone(), }, + true )?; } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 94e4f5bb..e3c250c1 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -166,7 +166,16 @@ async fn sync_helper( }; // TODO: match body.set_presence { - services().rooms.edus.presence.ping_presence(&sender_user)?; + services() + .rooms + .edus + .presence + .ping_presence( + &sender_user, + false, + true, + true + )?; // Setup watchers, so if there's no response, we can wait for them let watcher = services().globals.watch(&sender_user, &sender_device); diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 9154b3ef..564843a6 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -770,6 +770,7 @@ pub async fn send_transaction_message_route( }, sender: user_id.clone(), }, + true )?; } } diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index dae6f759..159edca6 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,7 +1,7 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::user_id; -use std::{collections::HashMap, time::Duration}; -use tracing::error; +use std::{collections::{HashMap, hash_map::Entry}, time::Duration, mem}; +use tracing::{error, info}; use ruma::{ events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, @@ -17,19 +17,22 @@ use crate::{ pub struct PresenceUpdate { count: u64, - timestamp: u64, + prev_timestamp: u64, + curr_timestamp: u64, } impl PresenceUpdate { fn to_be_bytes(&self) -> Vec { - [self.count.to_be_bytes(), self.timestamp.to_be_bytes()].concat() + [self.count.to_be_bytes(), self.prev_timestamp.to_be_bytes(), self.curr_timestamp.to_be_bytes()].concat() } fn from_be_bytes(bytes: &[u8]) -> Result { - let (count_bytes, timestamp_bytes) = bytes.split_at(bytes.len() / 2); + let (count_bytes, timestamps_bytes) = bytes.split_at(mem::size_of::()); + let (prev_timestamp_bytes, curr_timestamp_bytes) = timestamps_bytes.split_at(mem::size_of::()); Ok(Self { count: u64_from_bytes(count_bytes).expect("count bytes from DB are valid"), - timestamp: u64_from_bytes(timestamp_bytes).expect("timestamp bytes from DB are valid"), + prev_timestamp: u64_from_bytes(prev_timestamp_bytes).expect("timestamp bytes from DB are valid"), + curr_timestamp: u64_from_bytes(curr_timestamp_bytes).expect("timestamp bytes from DB are valid"), }) } } @@ -48,14 +51,17 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { &serde_json::to_vec(&presence).expect("presence event from DB is valid"), )?; + let timestamp = match presence.content.last_active_ago { + Some(active_ago) => millis_since_unix_epoch().saturating_sub(active_ago.into()), + None => millis_since_unix_epoch(), + }; + self.userid_presenceupdate.insert( user_id.as_bytes(), &*PresenceUpdate { count: services().globals.next_count()?, - timestamp: match presence.content.last_active_ago { - Some(active_ago) => millis_since_unix_epoch().saturating_sub(active_ago.into()), - None => millis_since_unix_epoch(), - }, + prev_timestamp: timestamp, + curr_timestamp: timestamp, } .to_be_bytes(), )?; @@ -63,23 +69,41 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Ok(()) } - fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_presenceupdate.insert( - user_id.as_bytes(), - &*PresenceUpdate { + fn ping_presence(&self, user_id: &UserId, update_count: bool, update_timestamp: bool) -> Result<()> { + let now = millis_since_unix_epoch(); + + let presence = self.userid_presenceupdate + .get(user_id.as_bytes())? + .map(|presence_bytes| PresenceUpdate::from_be_bytes(&presence_bytes)) + .transpose()?; + + let new_presence = match presence { + Some(presence) => { + PresenceUpdate { + count: if update_count { services().globals.next_count()? } else { presence.count }, + prev_timestamp: if update_timestamp { presence.curr_timestamp } else { presence.prev_timestamp }, + curr_timestamp: if update_timestamp { now } else { presence.curr_timestamp } + } + }, + None => PresenceUpdate { count: services().globals.current_count()?, - timestamp: millis_since_unix_epoch(), + prev_timestamp: now, + curr_timestamp: now, } - .to_be_bytes(), + }; + + self.userid_presenceupdate.insert( + user_id.as_bytes(), + &*new_presence.to_be_bytes(), )?; Ok(()) } - fn last_presence_update(&self, user_id: &UserId) -> Result> { + fn last_presence_update(&self, user_id: &UserId) -> Result> { self.userid_presenceupdate .get(user_id.as_bytes())? - .map(|bytes| PresenceUpdate::from_be_bytes(&bytes).map(|update| update.timestamp)) + .map(|bytes| PresenceUpdate::from_be_bytes(&bytes).map(|update| (update.prev_timestamp, update.curr_timestamp))) .transpose() } @@ -101,21 +125,22 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { room_id: &RoomId, since: u64, ) -> Result + 'a>> { - let services = &services(); let user_timestamp: HashMap = self .userid_presenceupdate .iter() .filter_map(|(user_id_bytes, update_bytes)| { Some(( - OwnedUserId::from( - UserId::parse(utils::string_from_bytes(&user_id_bytes).ok()?).ok()?, - ), - PresenceUpdate::from_be_bytes(&update_bytes).ok()?, + UserId::parse( + utils::string_from_bytes(&user_id_bytes) + .expect("UserID bytes are a valid string") + ).expect("UserID bytes from database are a valid UserID"), + PresenceUpdate::from_be_bytes(&update_bytes) + .expect("PresenceUpdate bytes from database are a valid PresenceUpdate"), )) }) .filter_map(|(user_id, presence_update)| { if presence_update.count <= since - || !services + || !services() .rooms .state_cache .is_joined(&user_id, room_id) @@ -124,18 +149,20 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { return None; } - Some((user_id, presence_update.timestamp)) + Some((user_id, presence_update.curr_timestamp)) }) .collect(); Ok(Box::new( self.roomuserid_presenceevent - .iter() - .filter_map(|(user_id_bytes, presence_bytes)| { + .scan_prefix(room_id.as_bytes().to_vec()) + .filter_map(|(roomuserid_bytes, presence_bytes)| { + let user_id_bytes = roomuserid_bytes.split(|byte| *byte == 0xff as u8).last()?; Some(( - OwnedUserId::from( - UserId::parse(utils::string_from_bytes(&user_id_bytes).ok()?).ok()?, - ), + UserId::parse( + utils::string_from_bytes(&user_id_bytes) + .expect("UserID bytes are a valid string") + ).expect("UserID bytes from database are a valid UserID").to_owned(), presence_bytes, )) }) @@ -145,7 +172,8 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Some(( user_id, - parse_presence_event(&presence_bytes, *timestamp).ok()?, + parse_presence_event(&presence_bytes, *timestamp) + .expect("PresenceEvent bytes from database are a valid PresenceEvent"), )) }, ), @@ -157,6 +185,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { mut timer_receiver: mpsc::UnboundedReceiver, ) -> Result<()> { let mut timers = FuturesUnordered::new(); + let mut timers_timestamp: HashMap = HashMap::new(); // TODO: Get rid of this hack timers.push(create_presence_timer( @@ -168,10 +197,11 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { loop { tokio::select! { Some(user_id) = timers.next() => { - let presence_timestamp = match services().rooms.edus.presence.last_presence_update(&user_id) { - Ok(timestamp) => match timestamp { - Some(timestamp) => timestamp, - None => continue, + info!("Processing timer for user '{}' ({})", user_id.clone(), timers.len()); + let (prev_timestamp, curr_timestamp) = match services().rooms.edus.presence.last_presence_update(&user_id) { + Ok(timestamp_tuple) => match timestamp_tuple { + Some(timestamp_tuple) => timestamp_tuple, + None => continue, }, Err(e) => { error!("{e}"); @@ -179,46 +209,49 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { } }; - let presence_state = determine_presence_state(presence_timestamp); + let prev_presence_state = determine_presence_state(prev_timestamp); + let curr_presence_state = determine_presence_state(curr_timestamp); // Continue if there is no change in state - if presence_state != PresenceState::Offline { + if prev_presence_state == curr_presence_state { continue; } - for room_id in services() - .rooms - .state_cache - .rooms_joined(&user_id) - .filter_map(|room_id| room_id.ok()) { - let presence_event = match services().rooms.edus.presence.get_presence_event(&user_id, &room_id) { - Ok(event) => match event { - Some(event) => event, - None => continue, - }, - Err(e) => { - error!("{e}"); - continue; - } - }; + match services().rooms.edus.presence.ping_presence(&user_id, true, false, false) { + Ok(_) => (), + Err(e) => error!("{e}") + } - match services().rooms.edus.presence.update_presence(&user_id, &room_id, presence_event) { - Ok(()) => (), - Err(e) => { - error!("{e}"); - continue; + // TODO: Notify federation sender + } + Some(user_id) = timer_receiver.recv() => { + let now = millis_since_unix_epoch(); + let should_send = match timers_timestamp.entry(user_id.to_owned()) { + Entry::Occupied(mut entry) => { + if now - entry.get() > 15 * 1000 { + entry.insert(now); + true + } else { + false } + }, + Entry::Vacant(entry) => { + entry.insert(now); + true } + }; - // TODO: Send event over federation + if !should_send { + continue; } - } - Some(user_id) = timer_receiver.recv() => { + // Idle timeout timers.push(create_presence_timer(Duration::from_secs(60), user_id.clone())); // Offline timeout - timers.push(create_presence_timer(Duration::from_secs(60*15) , user_id)); + timers.push(create_presence_timer(Duration::from_secs(60*15) , user_id.clone())); + + info!("Added timers for user '{}' ({})", user_id, timers.len()); } } } diff --git a/src/database/mod.rs b/src/database/mod.rs index 7baa512a..563076e1 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -825,6 +825,9 @@ impl KeyValueDatabase { ); } + // Flush old presence data + db.userid_presenceupdate.clear()?; + services().admin.start_handler(); // Set emergency access for the conduit user diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 5dc4c3cb..d90eaece 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -15,10 +15,10 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Resets the presence timeout, so the user will stay in their current presence state. - fn ping_presence(&self, user_id: &UserId) -> Result<()>; + fn ping_presence(&self, user_id: &UserId, update_count: bool, update_timestamp: bool) -> Result<()>; /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - fn last_presence_update(&self, user_id: &UserId) -> Result>; + fn last_presence_update(&self, user_id: &UserId) -> Result>; /// Returns the presence event with correct last_active_ago. fn get_presence_event( diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index faac5c76..427c4fd1 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -14,18 +14,28 @@ pub struct Service { } impl Service { + /// Builds the service and initialized the presence_maintain task pub fn build(db: &'static dyn Data) -> Result { let (sender, receiver) = mpsc::unbounded_channel(); let service = Self { db, timer_sender: sender, }; - + service.presence_maintain(receiver)?; Ok(service) } + /// Resets the presence timeout, so the user will stay in their current presence state. + pub fn ping_presence(&self, user_id: &UserId, update_count: bool, update_timestamp: bool, spawn_timer: bool) -> Result<()> { + if spawn_timer { + self.spawn_timer(user_id)?; + } + + self.db.ping_presence(user_id, update_count, update_timestamp) + } + /// Adds a presence event which will be saved until a new event replaces it. /// /// Note: This method takes a RoomId because presence updates are always bound to rooms to @@ -35,45 +45,34 @@ impl Service { user_id: &UserId, room_id: &RoomId, presence: PresenceEvent, + spawn_timer: bool ) -> Result<()> { - self.timer_sender - .send(user_id.into()) - .map_err(|_| Error::bad_database("Sender errored out"))?; - self.db.update_presence(user_id, room_id, presence) - } + if spawn_timer { + self.spawn_timer(user_id)?; + } - /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.timer_sender - .send(user_id.into()) - .map_err(|_| Error::bad_database("Sender errored out"))?; - self.db.ping_presence(user_id) + self.db.update_presence(user_id, room_id, presence) } - pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + /// Returns the timestamp of when the presence was last updated for the specified user. + pub fn last_presence_update(&self, user_id: &UserId) -> Result> { self.db.last_presence_update(user_id) } + /// Returns the saved presence event for this user with actual last_active_ago. pub fn get_presence_event( &self, user_id: &UserId, room_id: &RoomId, ) -> Result> { let last_update = match self.db.last_presence_update(user_id)? { - Some(last) => last, + Some(last) => last.1, None => return Ok(None), }; self.db.get_presence_event(room_id, user_id, last_update) } - pub fn presence_maintain( - &self, - timer_receiver: mpsc::UnboundedReceiver, - ) -> Result<()> { - self.db.presence_maintain(timer_receiver) - } - /// Returns the most recent presence updates that happened after the event with id `since`. #[tracing::instrument(skip(self, since, room_id))] pub fn presence_since( @@ -83,4 +82,21 @@ impl Service { ) -> Result>> { self.db.presence_since(room_id, since) } + + /// Spawns a task maintaining presence data + fn presence_maintain( + &self, + timer_receiver: mpsc::UnboundedReceiver, + ) -> Result<()> { + self.db.presence_maintain(timer_receiver) + } + + /// Spawns a timer for the user used by the maintenance task + fn spawn_timer(&self, user_id: &UserId) -> Result<()> { + self.timer_sender + .send(user_id.into()) + .map_err(|_| Error::bad_database("Sender errored out"))?; + + Ok(()) + } } From 42b27f2f60d93084129240704099f494eab63630 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Fri, 18 Nov 2022 22:39:05 +0100 Subject: [PATCH 16/53] style(presence): reformat with cargo --- src/api/client_server/presence.rs | 8 +- src/api/client_server/profile.rs | 4 +- src/api/client_server/sync.rs | 7 +- src/api/server_server.rs | 2 +- src/database/key_value/rooms/edus/presence.rs | 92 +++++++++++++------ src/service/rooms/edus/presence/data.rs | 7 +- src/service/rooms/edus/presence/mod.rs | 21 +++-- 7 files changed, 93 insertions(+), 48 deletions(-) diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index f363933e..583b8798 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,5 +1,9 @@ use crate::{services, Result, Ruma}; -use ruma::{api::client::presence::{get_presence, set_presence}, uint, presence::PresenceState}; +use ruma::{ + api::client::presence::{get_presence, set_presence}, + presence::PresenceState, + uint, +}; use std::time::Duration; /// # `PUT /_matrix/client/r0/presence/{userId}/status` @@ -27,7 +31,7 @@ pub async fn set_presence_route( }, sender: sender_user.clone(), }, - true + true, )?; } diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 0e667290..09f1a5e8 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -109,7 +109,7 @@ pub async fn set_displayname_route( }, sender: sender_user.clone(), }, - true + true, )?; } @@ -245,7 +245,7 @@ pub async fn set_avatar_url_route( }, sender: sender_user.clone(), }, - true + true, )?; } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index e3c250c1..03ef17a1 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -170,12 +170,7 @@ async fn sync_helper( .rooms .edus .presence - .ping_presence( - &sender_user, - false, - true, - true - )?; + .ping_presence(&sender_user, false, true, true)?; // Setup watchers, so if there's no response, we can wait for them let watcher = services().globals.watch(&sender_user, &sender_device); diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 564843a6..543bd837 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -770,7 +770,7 @@ pub async fn send_transaction_message_route( }, sender: user_id.clone(), }, - true + true, )?; } } diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 159edca6..e23370a0 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,6 +1,10 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::user_id; -use std::{collections::{HashMap, hash_map::Entry}, time::Duration, mem}; +use std::{ + collections::{hash_map::Entry, HashMap}, + mem, + time::Duration, +}; use tracing::{error, info}; use ruma::{ @@ -23,16 +27,24 @@ pub struct PresenceUpdate { impl PresenceUpdate { fn to_be_bytes(&self) -> Vec { - [self.count.to_be_bytes(), self.prev_timestamp.to_be_bytes(), self.curr_timestamp.to_be_bytes()].concat() + [ + self.count.to_be_bytes(), + self.prev_timestamp.to_be_bytes(), + self.curr_timestamp.to_be_bytes(), + ] + .concat() } fn from_be_bytes(bytes: &[u8]) -> Result { let (count_bytes, timestamps_bytes) = bytes.split_at(mem::size_of::()); - let (prev_timestamp_bytes, curr_timestamp_bytes) = timestamps_bytes.split_at(mem::size_of::()); + let (prev_timestamp_bytes, curr_timestamp_bytes) = + timestamps_bytes.split_at(mem::size_of::()); Ok(Self { count: u64_from_bytes(count_bytes).expect("count bytes from DB are valid"), - prev_timestamp: u64_from_bytes(prev_timestamp_bytes).expect("timestamp bytes from DB are valid"), - curr_timestamp: u64_from_bytes(curr_timestamp_bytes).expect("timestamp bytes from DB are valid"), + prev_timestamp: u64_from_bytes(prev_timestamp_bytes) + .expect("timestamp bytes from DB are valid"), + curr_timestamp: u64_from_bytes(curr_timestamp_bytes) + .expect("timestamp bytes from DB are valid"), }) } } @@ -69,33 +81,47 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Ok(()) } - fn ping_presence(&self, user_id: &UserId, update_count: bool, update_timestamp: bool) -> Result<()> { + fn ping_presence( + &self, + user_id: &UserId, + update_count: bool, + update_timestamp: bool, + ) -> Result<()> { let now = millis_since_unix_epoch(); - let presence = self.userid_presenceupdate + let presence = self + .userid_presenceupdate .get(user_id.as_bytes())? .map(|presence_bytes| PresenceUpdate::from_be_bytes(&presence_bytes)) .transpose()?; let new_presence = match presence { - Some(presence) => { - PresenceUpdate { - count: if update_count { services().globals.next_count()? } else { presence.count }, - prev_timestamp: if update_timestamp { presence.curr_timestamp } else { presence.prev_timestamp }, - curr_timestamp: if update_timestamp { now } else { presence.curr_timestamp } - } + Some(presence) => PresenceUpdate { + count: if update_count { + services().globals.next_count()? + } else { + presence.count + }, + prev_timestamp: if update_timestamp { + presence.curr_timestamp + } else { + presence.prev_timestamp + }, + curr_timestamp: if update_timestamp { + now + } else { + presence.curr_timestamp + }, }, None => PresenceUpdate { count: services().globals.current_count()?, prev_timestamp: now, curr_timestamp: now, - } + }, }; - self.userid_presenceupdate.insert( - user_id.as_bytes(), - &*new_presence.to_be_bytes(), - )?; + self.userid_presenceupdate + .insert(user_id.as_bytes(), &*new_presence.to_be_bytes())?; Ok(()) } @@ -103,7 +129,10 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { fn last_presence_update(&self, user_id: &UserId) -> Result> { self.userid_presenceupdate .get(user_id.as_bytes())? - .map(|bytes| PresenceUpdate::from_be_bytes(&bytes).map(|update| (update.prev_timestamp, update.curr_timestamp))) + .map(|bytes| { + PresenceUpdate::from_be_bytes(&bytes) + .map(|update| (update.prev_timestamp, update.curr_timestamp)) + }) .transpose() } @@ -132,15 +161,16 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Some(( UserId::parse( utils::string_from_bytes(&user_id_bytes) - .expect("UserID bytes are a valid string") - ).expect("UserID bytes from database are a valid UserID"), + .expect("UserID bytes are a valid string"), + ) + .expect("UserID bytes from database are a valid UserID"), PresenceUpdate::from_be_bytes(&update_bytes) - .expect("PresenceUpdate bytes from database are a valid PresenceUpdate"), + .expect("PresenceUpdate bytes from database are a valid PresenceUpdate"), )) }) .filter_map(|(user_id, presence_update)| { if presence_update.count <= since - || !services() + || !services() .rooms .state_cache .is_joined(&user_id, room_id) @@ -157,12 +187,15 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { self.roomuserid_presenceevent .scan_prefix(room_id.as_bytes().to_vec()) .filter_map(|(roomuserid_bytes, presence_bytes)| { - let user_id_bytes = roomuserid_bytes.split(|byte| *byte == 0xff as u8).last()?; + let user_id_bytes = + roomuserid_bytes.split(|byte| *byte == 0xff as u8).last()?; Some(( UserId::parse( - utils::string_from_bytes(&user_id_bytes) - .expect("UserID bytes are a valid string") - ).expect("UserID bytes from database are a valid UserID").to_owned(), + utils::string_from_bytes(&user_id_bytes) + .expect("UserID bytes are a valid string"), + ) + .expect("UserID bytes from database are a valid UserID") + .to_owned(), presence_bytes, )) }) @@ -172,8 +205,9 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Some(( user_id, - parse_presence_event(&presence_bytes, *timestamp) - .expect("PresenceEvent bytes from database are a valid PresenceEvent"), + parse_presence_event(&presence_bytes, *timestamp).expect( + "PresenceEvent bytes from database are a valid PresenceEvent", + ), )) }, ), diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index d90eaece..02c93714 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -15,7 +15,12 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Resets the presence timeout, so the user will stay in their current presence state. - fn ping_presence(&self, user_id: &UserId, update_count: bool, update_timestamp: bool) -> Result<()>; + fn ping_presence( + &self, + user_id: &UserId, + update_count: bool, + update_timestamp: bool, + ) -> Result<()>; /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. fn last_presence_update(&self, user_id: &UserId) -> Result>; diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 427c4fd1..8d3e46aa 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -21,19 +21,26 @@ impl Service { db, timer_sender: sender, }; - + service.presence_maintain(receiver)?; Ok(service) } /// Resets the presence timeout, so the user will stay in their current presence state. - pub fn ping_presence(&self, user_id: &UserId, update_count: bool, update_timestamp: bool, spawn_timer: bool) -> Result<()> { + pub fn ping_presence( + &self, + user_id: &UserId, + update_count: bool, + update_timestamp: bool, + spawn_timer: bool, + ) -> Result<()> { if spawn_timer { self.spawn_timer(user_id)?; } - self.db.ping_presence(user_id, update_count, update_timestamp) + self.db + .ping_presence(user_id, update_count, update_timestamp) } /// Adds a presence event which will be saved until a new event replaces it. @@ -45,7 +52,7 @@ impl Service { user_id: &UserId, room_id: &RoomId, presence: PresenceEvent, - spawn_timer: bool + spawn_timer: bool, ) -> Result<()> { if spawn_timer { self.spawn_timer(user_id)?; @@ -85,9 +92,9 @@ impl Service { /// Spawns a task maintaining presence data fn presence_maintain( - &self, - timer_receiver: mpsc::UnboundedReceiver, - ) -> Result<()> { + &self, + timer_receiver: mpsc::UnboundedReceiver, + ) -> Result<()> { self.db.presence_maintain(timer_receiver) } From 8efcec6283d43efe40bd8081ca1f7ec6e29d90d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sat, 19 Nov 2022 21:52:26 +0100 Subject: [PATCH 17/53] feat(presence): send presence events for own users unreliably --- src/service/sending/mod.rs | 27 ++++++++++++++++++++++++++- 1 file changed, 26 insertions(+), 1 deletion(-) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index afa12fc7..3de704a8 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -24,7 +24,7 @@ use ruma::{ federation::{ self, transactions::edu::{ - DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, + DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, PresenceUpdate, PresenceContent, }, }, OutgoingRequest, @@ -283,6 +283,31 @@ impl Service { .filter(|user_id| user_id.server_name() == services().globals.server_name()), ); + // Look for presence updates in this room + let presence_updates: Vec = services() + .rooms + .edus + .presence + .presence_since(&room_id, since)? + .filter(|(user_id, _)| user_id.server_name() == services().globals.server_name()) + .map(|(user_id, presence_event)| PresenceUpdate { + user_id, + presence: presence_event.content.presence, + status_msg: presence_event.content.status_msg, + last_active_ago: presence_event.content.last_active_ago.unwrap_or(uint!(0)), + currently_active: presence_event.content.currently_active.unwrap_or(false), + }) + .collect(); + + let presence_content = PresenceContent { + push: presence_updates, + }; + + events.push( + serde_json::to_vec(&Edu::Presence(presence_content)) + .expect("presence json can be serialized"), + ); + // Look for read receipts in this room for r in services() .rooms From 5bdc0312632cd35fd0cdab5b644ba418a23d2de7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sat, 19 Nov 2022 22:03:11 +0100 Subject: [PATCH 18/53] style(presence): reformat with cargo fmt --- src/service/sending/mod.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 3de704a8..374e6e3c 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -24,7 +24,8 @@ use ruma::{ federation::{ self, transactions::edu::{ - DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, PresenceUpdate, PresenceContent, + DeviceListUpdateContent, Edu, PresenceContent, PresenceUpdate, ReceiptContent, + ReceiptData, ReceiptMap, }, }, OutgoingRequest, @@ -304,7 +305,7 @@ impl Service { }; events.push( - serde_json::to_vec(&Edu::Presence(presence_content)) + serde_json::to_vec(&Edu::Presence(presence_content)) .expect("presence json can be serialized"), ); From 5e4e4d0089907499f2ecfb541c05d15fe5b99d1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Sun, 20 Nov 2022 00:11:58 +0100 Subject: [PATCH 19/53] style(config): remove useless cast --- src/config/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 0058dfae..b4dbdfb4 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -263,11 +263,11 @@ fn default_turn_ttl() -> u64 { } fn default_presence_idle_timeout() -> u64 { - 1 * 60 * 1000 as u64 + 1 * 60 * 1000 } fn default_presence_offline_timeout() -> u64 { - 15 * 60 * 1000 as u64 + 15 * 60 * 1000 } // I know, it's a great name From f9d10e8f41d636672198f0a1ceabf22c714a2199 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Mon, 21 Nov 2022 21:24:37 +0100 Subject: [PATCH 20/53] feat(presence): start work on cleanup task --- src/config/mod.rs | 17 +++++++++-- src/database/key_value/rooms/edus/presence.rs | 28 +++++++++++++++---- src/service/globals/mod.rs | 8 ++++++ src/service/rooms/edus/presence/data.rs | 2 ++ src/service/rooms/edus/presence/mod.rs | 4 +++ 5 files changed, 52 insertions(+), 7 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index b4dbdfb4..7ea551ef 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -81,6 +81,11 @@ pub struct Config { #[serde(default = "default_presence_offline_timeout")] pub presence_offline_timeout: u64, + #[serde(default = "default_presence_cleanup_period")] + pub presence_cleanup_period: u64, + #[serde(default = "default_presence_cleanup_limit")] + pub presence_cleanup_limit: u64, + #[serde(flatten)] pub catchall: BTreeMap, } @@ -263,11 +268,19 @@ fn default_turn_ttl() -> u64 { } fn default_presence_idle_timeout() -> u64 { - 1 * 60 * 1000 + 1 * 60 } fn default_presence_offline_timeout() -> u64 { - 15 * 60 * 1000 + 30 * 60 +} + +fn default_presence_cleanup_period() -> u64 { + 24 * 60 * 60 +} + +fn default_presence_cleanup_limit() -> u64 { + 24 * 60 * 60 } // I know, it's a great name diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index e23370a0..c2348492 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -220,11 +220,13 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { ) -> Result<()> { let mut timers = FuturesUnordered::new(); let mut timers_timestamp: HashMap = HashMap::new(); + let idle_timeout = Duration::from_secs(services().globals.presence_idle_timeout()); + let offline_timeout = Duration::from_secs(services().globals.presence_offline_timeout()); - // TODO: Get rid of this hack + // TODO: Get rid of this hack (hinting correct types to rustc) timers.push(create_presence_timer( - Duration::from_secs(60), - user_id!("@test:test.com").to_owned(), + Duration::from_secs(1), + UserId::parse_with_server_name("conduit", services().globals.server_name()).expect("Conduit user always exists") )); tokio::spawn(async move { @@ -260,6 +262,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { } Some(user_id) = timer_receiver.recv() => { let now = millis_since_unix_epoch(); + // Do not create timers if we added timers recently let should_send = match timers_timestamp.entry(user_id.to_owned()) { Entry::Occupied(mut entry) => { if now - entry.get() > 15 * 1000 { @@ -280,10 +283,10 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { } // Idle timeout - timers.push(create_presence_timer(Duration::from_secs(60), user_id.clone())); + timers.push(create_presence_timer(idle_timeout, user_id.clone())); // Offline timeout - timers.push(create_presence_timer(Duration::from_secs(60*15) , user_id.clone())); + timers.push(create_presence_timer(offline_timeout, user_id.clone())); info!("Added timers for user '{}' ({})", user_id, timers.len()); } @@ -293,6 +296,21 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Ok(()) } + + fn presence_cleanup(&self) -> Result<()> { + let period = Duration::from_secs(services().globals.presence_cleanup_period()); + let age_limit = Duration::from_secs(services().globals.presence_cleanup_limit()); + + tokio::spawn(async move { + loop { + // TODO: Cleanup + + sleep(period).await; + } + }); + + Ok(()) + } } async fn create_presence_timer(duration: Duration, user_id: OwnedUserId) -> OwnedUserId { diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index 94e3fb97..aa9e832d 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -294,6 +294,14 @@ impl Service { self.config.presence_offline_timeout } + pub fn presence_cleanup_period(&self) -> u64 { + self.config.presence_cleanup_period + } + + pub fn presence_cleanup_limit(&self) -> u64 { + self.config.presence_cleanup_limit + } + pub fn supported_room_versions(&self) -> Vec { let mut room_versions: Vec = vec![]; room_versions.extend(self.stable_room_versions.clone()); diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 02c93714..138258cd 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -42,4 +42,6 @@ pub trait Data: Send + Sync { fn presence_maintain(&self, timer_receiver: mpsc::UnboundedReceiver) -> Result<()>; + + fn presence_cleanup(&self) -> Result<()>; } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 8d3e46aa..7d2520d3 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -98,6 +98,10 @@ impl Service { self.db.presence_maintain(timer_receiver) } + fn presence_cleanup(&self) -> Result<()> { + self.db.presence_cleanup() + } + /// Spawns a timer for the user used by the maintenance task fn spawn_timer(&self, user_id: &UserId) -> Result<()> { self.timer_sender From 8d161c6a367137728e84192bded7110b945fa1ca Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Mon, 21 Nov 2022 21:46:21 +0100 Subject: [PATCH 21/53] feat(presence): finish presence cleanup task --- src/database/key_value/rooms/edus/presence.rs | 48 +++++++++++++++++-- src/service/rooms/edus/presence/mod.rs | 3 +- 2 files changed, 45 insertions(+), 6 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index c2348492..f7345f6c 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -1,5 +1,4 @@ use futures_util::{stream::FuturesUnordered, StreamExt}; -use ruma::user_id; use std::{ collections::{hash_map::Entry, HashMap}, mem, @@ -157,8 +156,8 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { let user_timestamp: HashMap = self .userid_presenceupdate .iter() - .filter_map(|(user_id_bytes, update_bytes)| { - Some(( + .map(|(user_id_bytes, update_bytes)| { + ( UserId::parse( utils::string_from_bytes(&user_id_bytes) .expect("UserID bytes are a valid string"), @@ -166,7 +165,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { .expect("UserID bytes from database are a valid UserID"), PresenceUpdate::from_be_bytes(&update_bytes) .expect("PresenceUpdate bytes from database are a valid PresenceUpdate"), - )) + ) }) .filter_map(|(user_id, presence_update)| { if presence_update.count <= since @@ -301,10 +300,49 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { let period = Duration::from_secs(services().globals.presence_cleanup_period()); let age_limit = Duration::from_secs(services().globals.presence_cleanup_limit()); + let userid_presenceupdate = self.userid_presenceupdate.clone(); + let roomuserid_presenceevent = self.roomuserid_presenceevent.clone(); + tokio::spawn(async move { loop { - // TODO: Cleanup + let mut removed_events: u64 = 0; + let age_limit_curr = millis_since_unix_epoch().saturating_sub(age_limit.as_millis() as u64); + + for user_id in userid_presenceupdate + .iter() + .map(|(user_id_bytes, update_bytes)| { + ( + UserId::parse( + utils::string_from_bytes(&user_id_bytes) + .expect("UserID bytes are a valid string"), + ) + .expect("UserID bytes from database are a valid UserID"), + PresenceUpdate::from_be_bytes(&update_bytes) + .expect("PresenceUpdate bytes from database are a valid PresenceUpdate"), + ) + }) + .filter_map(|(user_id, presence_update)| { + if presence_update.curr_timestamp < age_limit_curr { + return None; + } + + Some(user_id) + }) + { + for room_id in services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(|room_id| room_id.ok()) + { + match roomuserid_presenceevent.remove(&*[room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat()) { + Ok(_) => removed_events += 1, + Err(e) => error!("An errord occured while removing a stale presence event: {e}") + } + } + } + info!("Cleaned up {removed_events} stale presence events!"); sleep(period).await; } }); diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 7d2520d3..e14b9322 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -23,7 +23,8 @@ impl Service { }; service.presence_maintain(receiver)?; - + service.presence_cleanup()?; + Ok(service) } From 230f09f8f7f06a431bc01f6756a48e82bfaa3171 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Mon, 21 Nov 2022 21:50:30 +0100 Subject: [PATCH 22/53] style(presence): reformat --- src/database/key_value/rooms/edus/presence.rs | 29 ++++++++++++------- src/service/rooms/edus/presence/mod.rs | 2 +- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index f7345f6c..e56e3242 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -225,7 +225,8 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { // TODO: Get rid of this hack (hinting correct types to rustc) timers.push(create_presence_timer( Duration::from_secs(1), - UserId::parse_with_server_name("conduit", services().globals.server_name()).expect("Conduit user always exists") + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("Conduit user always exists"), )); tokio::spawn(async move { @@ -306,19 +307,21 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { tokio::spawn(async move { loop { let mut removed_events: u64 = 0; - let age_limit_curr = millis_since_unix_epoch().saturating_sub(age_limit.as_millis() as u64); + let age_limit_curr = + millis_since_unix_epoch().saturating_sub(age_limit.as_millis() as u64); for user_id in userid_presenceupdate .iter() .map(|(user_id_bytes, update_bytes)| { ( - UserId::parse( - utils::string_from_bytes(&user_id_bytes) - .expect("UserID bytes are a valid string"), - ) - .expect("UserID bytes from database are a valid UserID"), - PresenceUpdate::from_be_bytes(&update_bytes) - .expect("PresenceUpdate bytes from database are a valid PresenceUpdate"), + UserId::parse( + utils::string_from_bytes(&user_id_bytes) + .expect("UserID bytes are a valid string"), + ) + .expect("UserID bytes from database are a valid UserID"), + PresenceUpdate::from_be_bytes(&update_bytes).expect( + "PresenceUpdate bytes from database are a valid PresenceUpdate", + ), ) }) .filter_map(|(user_id, presence_update)| { @@ -335,9 +338,13 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { .rooms_joined(&user_id) .filter_map(|room_id| room_id.ok()) { - match roomuserid_presenceevent.remove(&*[room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat()) { + match roomuserid_presenceevent + .remove(&*[room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat()) + { Ok(_) => removed_events += 1, - Err(e) => error!("An errord occured while removing a stale presence event: {e}") + Err(e) => error!( + "An errord occured while removing a stale presence event: {e}" + ), } } } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index e14b9322..f0e9833d 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -24,7 +24,7 @@ impl Service { service.presence_maintain(receiver)?; service.presence_cleanup()?; - + Ok(service) } From 63ac118d11c392f581e56533293a423acc88d80b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Mon, 21 Nov 2022 22:03:50 +0100 Subject: [PATCH 23/53] fix(presence): move sleep in presence cleanup --- src/config/mod.rs | 2 +- src/database/key_value/rooms/edus/presence.rs | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 7ea551ef..b40037bf 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -276,7 +276,7 @@ fn default_presence_offline_timeout() -> u64 { } fn default_presence_cleanup_period() -> u64 { - 24 * 60 * 60 + 1 * 60 * 60 } fn default_presence_cleanup_limit() -> u64 { diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index e56e3242..567c72ff 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -306,6 +306,8 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { tokio::spawn(async move { loop { + sleep(period).await; + let mut removed_events: u64 = 0; let age_limit_curr = millis_since_unix_epoch().saturating_sub(age_limit.as_millis() as u64); @@ -350,7 +352,6 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { } info!("Cleaned up {removed_events} stale presence events!"); - sleep(period).await; } }); From 77b555f2d6d4f79b49b3ff371bed57c45f2e5b71 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Mon, 21 Nov 2022 22:13:16 +0100 Subject: [PATCH 24/53] fix(presence): don't cause panic --- src/database/key_value/rooms/edus/presence.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 567c72ff..74779cb8 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -224,7 +224,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { // TODO: Get rid of this hack (hinting correct types to rustc) timers.push(create_presence_timer( - Duration::from_secs(1), + idle_timeout, UserId::parse_with_server_name("conduit", services().globals.server_name()) .expect("Conduit user always exists"), )); From dd85316bd9153df6af9c86647105994a51252cb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Mon, 21 Nov 2022 22:39:18 +0100 Subject: [PATCH 25/53] fix(presence): allow services to start before running tasks --- src/config/mod.rs | 2 +- src/database/key_value/rooms/edus/presence.rs | 35 +++++++++++-------- 2 files changed, 22 insertions(+), 15 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index b40037bf..7ea551ef 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -276,7 +276,7 @@ fn default_presence_offline_timeout() -> u64 { } fn default_presence_cleanup_period() -> u64 { - 1 * 60 * 60 + 24 * 60 * 60 } fn default_presence_cleanup_limit() -> u64 { diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 74779cb8..3d6eb208 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -219,17 +219,22 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { ) -> Result<()> { let mut timers = FuturesUnordered::new(); let mut timers_timestamp: HashMap = HashMap::new(); - let idle_timeout = Duration::from_secs(services().globals.presence_idle_timeout()); - let offline_timeout = Duration::from_secs(services().globals.presence_offline_timeout()); - - // TODO: Get rid of this hack (hinting correct types to rustc) - timers.push(create_presence_timer( - idle_timeout, - UserId::parse_with_server_name("conduit", services().globals.server_name()) - .expect("Conduit user always exists"), - )); tokio::spawn(async move { + // Wait for services to be created + sleep(Duration::from_secs(15)).await; + + let idle_timeout = Duration::from_secs(services().globals.presence_idle_timeout()); + let offline_timeout = + Duration::from_secs(services().globals.presence_offline_timeout()); + + // TODO: Get rid of this hack (hinting correct types to rustc) + timers.push(create_presence_timer( + idle_timeout, + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("Conduit user always exists"), + )); + loop { tokio::select! { Some(user_id) = timers.next() => { @@ -298,16 +303,17 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { } fn presence_cleanup(&self) -> Result<()> { - let period = Duration::from_secs(services().globals.presence_cleanup_period()); - let age_limit = Duration::from_secs(services().globals.presence_cleanup_limit()); - let userid_presenceupdate = self.userid_presenceupdate.clone(); let roomuserid_presenceevent = self.roomuserid_presenceevent.clone(); tokio::spawn(async move { - loop { - sleep(period).await; + // Wait for services to be created + sleep(Duration::from_secs(15)).await; + + let period = Duration::from_secs(services().globals.presence_cleanup_period()); + let age_limit = Duration::from_secs(services().globals.presence_cleanup_limit()); + loop { let mut removed_events: u64 = 0; let age_limit_curr = millis_since_unix_epoch().saturating_sub(age_limit.as_millis() as u64); @@ -352,6 +358,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { } info!("Cleaned up {removed_events} stale presence events!"); + sleep(period).await; } }); From 4d22cb502e6d20259549cffd4fd4b99d32e0f748 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Mon, 21 Nov 2022 22:48:58 +0100 Subject: [PATCH 26/53] feat(presence): remove old presence updates --- src/database/key_value/rooms/edus/presence.rs | 7 +++++++ src/database/mod.rs | 3 --- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 3d6eb208..117c9da5 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -340,6 +340,13 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Some(user_id) }) { + match userid_presenceupdate.remove(&*user_id.as_bytes()) { + Ok(_) => (), + Err(e) => { + error!("An errord occured while removing a stale presence update: {e}") + } + } + for room_id in services() .rooms .state_cache diff --git a/src/database/mod.rs b/src/database/mod.rs index 563076e1..7baa512a 100644 --- a/src/database/mod.rs +++ b/src/database/mod.rs @@ -825,9 +825,6 @@ impl KeyValueDatabase { ); } - // Flush old presence data - db.userid_presenceupdate.clear()?; - services().admin.start_handler(); // Set emergency access for the conduit user From 2eb5907d95507cea8f0abc79d69215d010730284 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Mon, 21 Nov 2022 22:56:58 +0100 Subject: [PATCH 27/53] fix(presence): fix configuration values for presence status --- src/database/key_value/rooms/edus/presence.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 117c9da5..ee41584a 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -391,9 +391,9 @@ fn parse_presence_event(bytes: &[u8], presence_timestamp: u64) -> Result PresenceState { let globals = &services().globals; - return if last_active_ago < globals.presence_idle_timeout() { + return if last_active_ago < globals.presence_idle_timeout() * 1000 { PresenceState::Online - } else if last_active_ago < globals.presence_offline_timeout() { + } else if last_active_ago < globals.presence_offline_timeout() * 1000 { PresenceState::Unavailable } else { PresenceState::Offline From 502526789d27c0c1d460b8e6e3eb0b88a3013ae0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Tue, 22 Nov 2022 18:37:04 +0100 Subject: [PATCH 28/53] style(presence): code cleanup --- src/api/client_server/presence.rs | 7 +--- src/database/key_value/rooms/edus/presence.rs | 39 +++++++------------ 2 files changed, 15 insertions(+), 31 deletions(-) diff --git a/src/api/client_server/presence.rs b/src/api/client_server/presence.rs index 583b8798..9bcd7ba9 100644 --- a/src/api/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -79,11 +79,6 @@ pub async fn get_presence_route( presence: presence.content.presence, }) } else { - Ok(get_presence::v3::Response { - status_msg: None, - currently_active: None, - last_active_ago: None, - presence: PresenceState::Offline, - }) + Ok(get_presence::v3::Response::new(PresenceState::Offline)) } } diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index ee41584a..b5e10e10 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -185,31 +185,20 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Ok(Box::new( self.roomuserid_presenceevent .scan_prefix(room_id.as_bytes().to_vec()) - .filter_map(|(roomuserid_bytes, presence_bytes)| { - let user_id_bytes = - roomuserid_bytes.split(|byte| *byte == 0xff as u8).last()?; - Some(( - UserId::parse( - utils::string_from_bytes(&user_id_bytes) - .expect("UserID bytes are a valid string"), - ) - .expect("UserID bytes from database are a valid UserID") - .to_owned(), - presence_bytes, - )) - }) - .filter_map( - move |(user_id, presence_bytes)| -> Option<(OwnedUserId, PresenceEvent)> { - let timestamp = user_timestamp.get(&user_id)?; - - Some(( - user_id, - parse_presence_event(&presence_bytes, *timestamp).expect( - "PresenceEvent bytes from database are a valid PresenceEvent", - ), - )) - }, - ), + .filter_map(move |(roomuserid_bytes, presence_bytes)| { + let user_id_bytes = roomuserid_bytes.split(|byte| *byte == 0xff).last()?; + let user_id: OwnedUserId = UserId::parse( + utils::string_from_bytes(&user_id_bytes) + .expect("UserID bytes are a valid string"), + ) + .expect("UserID bytes from database are a valid UserID"); + + let timestamp = user_timestamp.get(&user_id)?; + let presence_event = parse_presence_event(&presence_bytes, *timestamp) + .expect("PresenceEvent bytes from database are a valid PresenceEvent"); + + Some((user_id, presence_event)) + }), )) } From f269a15c230d28670014509b4b97f7d31de82cb8 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sun, 27 Nov 2022 17:42:20 +0100 Subject: [PATCH 29/53] chore: code cleanup / cargo clippy --- src/config/mod.rs | 2 +- src/database/key_value/rooms/edus/presence.rs | 14 +++++++------- src/service/sending/mod.rs | 5 ++++- 3 files changed, 12 insertions(+), 9 deletions(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 7ea551ef..43bd0179 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -268,7 +268,7 @@ fn default_turn_ttl() -> u64 { } fn default_presence_idle_timeout() -> u64 { - 1 * 60 + 60 } fn default_presence_offline_timeout() -> u64 { diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index b5e10e10..7a48559b 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -69,7 +69,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { self.userid_presenceupdate.insert( user_id.as_bytes(), - &*PresenceUpdate { + &PresenceUpdate { count: services().globals.next_count()?, prev_timestamp: timestamp, curr_timestamp: timestamp, @@ -120,7 +120,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { }; self.userid_presenceupdate - .insert(user_id.as_bytes(), &*new_presence.to_be_bytes())?; + .insert(user_id.as_bytes(), &new_presence.to_be_bytes())?; Ok(()) } @@ -188,7 +188,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { .filter_map(move |(roomuserid_bytes, presence_bytes)| { let user_id_bytes = roomuserid_bytes.split(|byte| *byte == 0xff).last()?; let user_id: OwnedUserId = UserId::parse( - utils::string_from_bytes(&user_id_bytes) + utils::string_from_bytes(user_id_bytes) .expect("UserID bytes are a valid string"), ) .expect("UserID bytes from database are a valid UserID"); @@ -329,7 +329,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { Some(user_id) }) { - match userid_presenceupdate.remove(&*user_id.as_bytes()) { + match userid_presenceupdate.remove(user_id.as_bytes()) { Ok(_) => (), Err(e) => { error!("An errord occured while removing a stale presence update: {e}") @@ -343,7 +343,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { .filter_map(|room_id| room_id.ok()) { match roomuserid_presenceevent - .remove(&*[room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat()) + .remove(&[room_id.as_bytes(), &[0xff], user_id.as_bytes()].concat()) { Ok(_) => removed_events += 1, Err(e) => error!( @@ -380,13 +380,13 @@ fn parse_presence_event(bytes: &[u8], presence_timestamp: u64) -> Result PresenceState { let globals = &services().globals; - return if last_active_ago < globals.presence_idle_timeout() * 1000 { + if last_active_ago < globals.presence_idle_timeout() * 1000 { PresenceState::Online } else if last_active_ago < globals.presence_offline_timeout() * 1000 { PresenceState::Unavailable } else { PresenceState::Offline - }; + } } /// Translates the timestamp representing last_active_ago to a diff from now. diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs index 374e6e3c..b809fd50 100644 --- a/src/service/sending/mod.rs +++ b/src/service/sending/mod.rs @@ -295,7 +295,10 @@ impl Service { user_id, presence: presence_event.content.presence, status_msg: presence_event.content.status_msg, - last_active_ago: presence_event.content.last_active_ago.unwrap_or(uint!(0)), + last_active_ago: presence_event + .content + .last_active_ago + .unwrap_or_else(|| uint!(0)), currently_active: presence_event.content.currently_active.unwrap_or(false), }) .collect(); From a2cffa9da30b04c02ecb752cc4ba128958dff1ba Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Mon, 28 Nov 2022 14:17:00 +0100 Subject: [PATCH 30/53] fix: Declare support for msc3827 Not doing so causes recent matrix-react-sdk versions to set room_types to null instead of omitting, which is against spec and results in ruma throwing an error, resulting in room listings being entirely unavailable for users of element-web and other matrix-react-sdk based clients We also pass room_types over federation already, not declaring support means compliant clients will not make use of said feature. --- src/api/client_server/unversioned.rs | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs index 8a5c3d25..598a2406 100644 --- a/src/api/client_server/unversioned.rs +++ b/src/api/client_server/unversioned.rs @@ -24,7 +24,10 @@ pub async fn get_supported_versions_route( "v1.1".to_owned(), "v1.2".to_owned(), ], - unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), + unstable_features: BTreeMap::from_iter([ + ("org.matrix.e2e_cross_signing".to_owned(), true), + ("org.matrix.msc3827.stable".to_owned(), true), + ]), }; Ok(resp) From 1f698718a04613319a19299840c28d3a09e75f23 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Wed, 7 Dec 2022 18:26:48 +0100 Subject: [PATCH 31/53] feat(presence): add configuration option to disable presence --- src/config/mod.rs | 3 +++ src/service/globals/mod.rs | 4 +++ src/service/rooms/edus/presence/mod.rs | 34 +++++++++++++++++++++++++- 3 files changed, 40 insertions(+), 1 deletion(-) diff --git a/src/config/mod.rs b/src/config/mod.rs index 43bd0179..025969e3 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -76,6 +76,9 @@ pub struct Config { pub emergency_password: Option, + #[serde(default = "true_fn")] + pub allow_presence: bool, + #[serde(default = "default_presence_idle_timeout")] pub presence_idle_timeout: u64, #[serde(default = "default_presence_offline_timeout")] diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index aa9e832d..50c465ce 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -286,6 +286,10 @@ impl Service { &self.config.emergency_password } + pub fn allow_presence(&self) -> bool { + self.config.allow_presence + } + pub fn presence_idle_timeout(&self) -> u64 { self.config.presence_idle_timeout } diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index f0e9833d..b78bf46e 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -4,7 +4,7 @@ pub use data::Data; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; use tokio::sync::mpsc; -use crate::{Error, Result}; +use crate::{Error, Result, services}; pub struct Service { pub db: &'static dyn Data, @@ -36,6 +36,10 @@ impl Service { update_timestamp: bool, spawn_timer: bool, ) -> Result<()> { + if !services().globals.allow_presence() { + return Ok(()) + } + if spawn_timer { self.spawn_timer(user_id)?; } @@ -55,6 +59,10 @@ impl Service { presence: PresenceEvent, spawn_timer: bool, ) -> Result<()> { + if !services().globals.allow_presence() { + return Ok(()) + } + if spawn_timer { self.spawn_timer(user_id)?; } @@ -64,6 +72,10 @@ impl Service { /// Returns the timestamp of when the presence was last updated for the specified user. pub fn last_presence_update(&self, user_id: &UserId) -> Result> { + if !services().globals.allow_presence() { + return Ok(None) + } + self.db.last_presence_update(user_id) } @@ -73,6 +85,10 @@ impl Service { user_id: &UserId, room_id: &RoomId, ) -> Result> { + if !services().globals.allow_presence() { + return Ok(None) + } + let last_update = match self.db.last_presence_update(user_id)? { Some(last) => last.1, None => return Ok(None), @@ -88,6 +104,10 @@ impl Service { room_id: &RoomId, since: u64, ) -> Result>> { + if !services().globals.allow_presence() { + return Ok(Box::new(std::iter::empty())) + } + self.db.presence_since(room_id, since) } @@ -96,15 +116,27 @@ impl Service { &self, timer_receiver: mpsc::UnboundedReceiver, ) -> Result<()> { + if !services().globals.allow_presence() { + return Ok(()) + } + self.db.presence_maintain(timer_receiver) } fn presence_cleanup(&self) -> Result<()> { + if !services().globals.allow_presence() { + return Ok(()) + } + self.db.presence_cleanup() } /// Spawns a timer for the user used by the maintenance task fn spawn_timer(&self, user_id: &UserId) -> Result<()> { + if !services().globals.allow_presence() { + return Ok(()) + } + self.timer_sender .send(user_id.into()) .map_err(|_| Error::bad_database("Sender errored out"))?; From 70652fe00cec5ff940b89b4474985094ccae470c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Wed, 7 Dec 2022 18:28:27 +0100 Subject: [PATCH 32/53] style(presence): reformat with cargo fmt --- src/service/rooms/edus/presence/mod.rs | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index b78bf46e..a4099e80 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -4,7 +4,7 @@ pub use data::Data; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; use tokio::sync::mpsc; -use crate::{Error, Result, services}; +use crate::{services, Error, Result}; pub struct Service { pub db: &'static dyn Data, @@ -37,7 +37,7 @@ impl Service { spawn_timer: bool, ) -> Result<()> { if !services().globals.allow_presence() { - return Ok(()) + return Ok(()); } if spawn_timer { @@ -60,7 +60,7 @@ impl Service { spawn_timer: bool, ) -> Result<()> { if !services().globals.allow_presence() { - return Ok(()) + return Ok(()); } if spawn_timer { @@ -73,7 +73,7 @@ impl Service { /// Returns the timestamp of when the presence was last updated for the specified user. pub fn last_presence_update(&self, user_id: &UserId) -> Result> { if !services().globals.allow_presence() { - return Ok(None) + return Ok(None); } self.db.last_presence_update(user_id) @@ -86,7 +86,7 @@ impl Service { room_id: &RoomId, ) -> Result> { if !services().globals.allow_presence() { - return Ok(None) + return Ok(None); } let last_update = match self.db.last_presence_update(user_id)? { @@ -105,7 +105,7 @@ impl Service { since: u64, ) -> Result>> { if !services().globals.allow_presence() { - return Ok(Box::new(std::iter::empty())) + return Ok(Box::new(std::iter::empty())); } self.db.presence_since(room_id, since) @@ -117,7 +117,7 @@ impl Service { timer_receiver: mpsc::UnboundedReceiver, ) -> Result<()> { if !services().globals.allow_presence() { - return Ok(()) + return Ok(()); } self.db.presence_maintain(timer_receiver) @@ -125,7 +125,7 @@ impl Service { fn presence_cleanup(&self) -> Result<()> { if !services().globals.allow_presence() { - return Ok(()) + return Ok(()); } self.db.presence_cleanup() @@ -134,7 +134,7 @@ impl Service { /// Spawns a timer for the user used by the maintenance task fn spawn_timer(&self, user_id: &UserId) -> Result<()> { if !services().globals.allow_presence() { - return Ok(()) + return Ok(()); } self.timer_sender From b4356917ebe8eb4a6478a7aad7df29dc70a655e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Wed, 7 Dec 2022 18:36:47 +0100 Subject: [PATCH 33/53] style(presence): make type alias for presence_since iter --- src/database/key_value/rooms/edus/presence.rs | 9 +++------ src/service/rooms/edus/presence/data.rs | 8 +++----- src/service/rooms/edus/presence/mod.rs | 8 +++----- 3 files changed, 9 insertions(+), 16 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 7a48559b..5c49b95a 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -13,7 +13,8 @@ use tokio::{sync::mpsc, time::sleep}; use crate::{ database::KeyValueDatabase, - service, services, utils, + service::{self, rooms::edus::presence::PresenceIter}, + services, utils, utils::{millis_since_unix_epoch, u64_from_bytes}, Error, Result, }; @@ -148,11 +149,7 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { .transpose() } - fn presence_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> Result + 'a>> { + fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result> { let user_timestamp: HashMap = self .userid_presenceupdate .iter() diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs index 138258cd..2dd78b6f 100644 --- a/src/service/rooms/edus/presence/data.rs +++ b/src/service/rooms/edus/presence/data.rs @@ -2,6 +2,8 @@ use crate::Result; use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; use tokio::sync::mpsc; +use super::PresenceIter; + pub trait Data: Send + Sync { /// Adds a presence event which will be saved until a new event replaces it. /// @@ -34,11 +36,7 @@ pub trait Data: Send + Sync { ) -> Result>; /// Returns the most recent presence updates that happened after the event with id `since`. - fn presence_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> Result + 'a>>; + fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result>; fn presence_maintain(&self, timer_receiver: mpsc::UnboundedReceiver) -> Result<()>; diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index a4099e80..38e0f022 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -6,6 +6,8 @@ use tokio::sync::mpsc; use crate::{services, Error, Result}; +pub(crate) type PresenceIter<'a> = Box + 'a>; + pub struct Service { pub db: &'static dyn Data, @@ -99,11 +101,7 @@ impl Service { /// Returns the most recent presence updates that happened after the event with id `since`. #[tracing::instrument(skip(self, since, room_id))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - ) -> Result>> { + pub fn presence_since<'a>(&'a self, room_id: &RoomId, since: u64) -> Result> { if !services().globals.allow_presence() { return Ok(Box::new(std::iter::empty())); } From 46676267df6fd0c6d6fa90b705f4a80714839d1c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Wed, 7 Dec 2022 18:39:32 +0100 Subject: [PATCH 34/53] config(example): add allow_presence --- conduit-example.toml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/conduit-example.toml b/conduit-example.toml index 0549030e..fee31020 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -38,8 +38,12 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true +# Enables federation. If set to false, this server will not federate with others (rooms from other server will not be available). allow_federation = true +# Enables presence. If set to false, the presence of users (whether they are online, idle or offline) will not be shown or processed. +allow_presence = true + # Enable the display name lightning bolt on registration. enable_lightning_bolt = true From 8257d0447a936aa74d04f9f5d5800d609fb9c25d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jakub=20Kub=C3=ADk?= Date: Wed, 7 Dec 2022 18:43:27 +0100 Subject: [PATCH 35/53] fix(presence): check for allow_presence only after services are available --- src/database/key_value/rooms/edus/presence.rs | 8 ++++++++ src/service/rooms/edus/presence/mod.rs | 8 -------- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs index 5c49b95a..7732983a 100644 --- a/src/database/key_value/rooms/edus/presence.rs +++ b/src/database/key_value/rooms/edus/presence.rs @@ -210,6 +210,10 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { // Wait for services to be created sleep(Duration::from_secs(15)).await; + if !services().globals.allow_presence() { + return; + } + let idle_timeout = Duration::from_secs(services().globals.presence_idle_timeout()); let offline_timeout = Duration::from_secs(services().globals.presence_offline_timeout()); @@ -296,6 +300,10 @@ impl service::rooms::edus::presence::Data for KeyValueDatabase { // Wait for services to be created sleep(Duration::from_secs(15)).await; + if !services().globals.allow_presence() { + return; + } + let period = Duration::from_secs(services().globals.presence_cleanup_period()); let age_limit = Duration::from_secs(services().globals.presence_cleanup_limit()); diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs index 38e0f022..0f3421c9 100644 --- a/src/service/rooms/edus/presence/mod.rs +++ b/src/service/rooms/edus/presence/mod.rs @@ -114,18 +114,10 @@ impl Service { &self, timer_receiver: mpsc::UnboundedReceiver, ) -> Result<()> { - if !services().globals.allow_presence() { - return Ok(()); - } - self.db.presence_maintain(timer_receiver) } fn presence_cleanup(&self) -> Result<()> { - if !services().globals.allow_presence() { - return Ok(()); - } - self.db.presence_cleanup() } From f13673e2b04efd07d2718db2c336cc2e4c552f31 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Mon, 28 Nov 2022 01:42:24 +0200 Subject: [PATCH 36/53] refactor: Replace specialized interface for user's membership with more generic one in state accessor --- .../key_value/rooms/state_accessor.rs | 52 ++++++------------- src/service/rooms/state_accessor/data.rs | 9 ++-- src/service/rooms/state_accessor/mod.rs | 18 +++++-- 3 files changed, 33 insertions(+), 46 deletions(-) diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ffdd8acc..ec6956ce 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -9,6 +9,7 @@ use ruma::{ events::{room::member::MembershipState, StateEventType}, EventId, RoomId, UserId, }; +use serde_json::Value; #[async_trait] impl service::rooms::state_accessor::Data for KeyValueDatabase { @@ -131,7 +132,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { shortstatehash: u64, event_type: &StateEventType, state_key: &str, - ) -> Result> { + ) -> Result> { let content = self .state_get(shortstatehash, event_type, state_key)? .map(|event| serde_json::from_str(event.content.get())) @@ -159,40 +160,21 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { }) } - /// The user was a joined member at this state (potentially in the past) - fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> Result { - Ok(self - .state_get_content( - shortstatehash, - &StateEventType::RoomMember, - user_id.as_str(), - )? - .map(|content| match content.get("membership") { - Some(membership) => MembershipState::from(membership.as_str().unwrap_or("")), - None => MembershipState::Leave, - } == MembershipState::Join) - .unwrap_or(false)) - } - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> Result { - Ok(self - .state_get_content( - shortstatehash, - &StateEventType::RoomMember, - user_id.as_str(), - )? - .map(|content| { - let membership = match content.get("membership") { - Some(membership) => MembershipState::from(membership.as_str().unwrap_or("")), - None => MembershipState::Leave, - }; - let joined = membership == MembershipState::Join; - let invited = membership == MembershipState::Invite; - invited || joined - }) - .unwrap_or(false)) + /// Get membership for given user in state + fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result { + self.state_get_content( + shortstatehash, + &StateEventType::RoomMember, + user_id.as_str(), + )? + .map(|content| match content.get("membership") { + Some(Value::String(membership)) => Ok(MembershipState::from(membership.as_str())), + None => Ok(MembershipState::Leave), + _ => Err(Error::bad_database( + "Malformed membership, expected Value::String", + )), + }) + .unwrap_or(Ok(MembershipState::Leave)) } /// Returns the full room state. diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs index 4cff0736..3a5cb49e 100644 --- a/src/service/rooms/state_accessor/data.rs +++ b/src/service/rooms/state_accessor/data.rs @@ -4,6 +4,7 @@ use std::{ }; use async_trait::async_trait; +use ruma::events::room::member::MembershipState; use ruma::{events::StateEventType, EventId, RoomId, UserId}; use crate::{PduEvent, Result}; @@ -45,12 +46,8 @@ pub trait Data: Send + Sync { /// Returns the state hash for this pdu. fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; - /// The user was a joined member at this state (potentially in the past) - fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> Result; - - /// The user was an invited or joined room member at this state (potentially - /// in the past) - fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> Result; + /// Get membership for given user in state + fn user_membership(&self, shortstatehash: u64, user_id: &UserId) -> Result; /// Returns the full room state. async fn room_state_full( diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index b53488d0..fd464d89 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -7,7 +7,9 @@ use std::{ pub use data::Data; use lru_cache::LruCache; use ruma::{ - events::{room::history_visibility::HistoryVisibility, StateEventType}, + events::{ + room::history_visibility::HistoryVisibility, room::member::MembershipState, StateEventType, + }, EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; @@ -151,14 +153,20 @@ impl Service { } /// The user was a joined member at this state (potentially in the past) - pub fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> Result { - self.db.user_was_joined(shortstatehash, user_id) + pub fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { + self.db + .user_membership(shortstatehash, user_id) + .map(|s| s == MembershipState::Join) + .unwrap_or_default() // Return sensible default, i.e. false } /// The user was an invited or joined room member at this state (potentially /// in the past) - pub fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> Result { - self.db.user_was_invited(shortstatehash, user_id) + pub fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { + self.db + .user_membership(shortstatehash, user_id) + .map(|s| s == MembershipState::Join || s == MembershipState::Invite) + .unwrap_or_default() // Return sensible default, i.e. false } /// Returns the full room state. From 5c0b0cdc6413764e53fc015a4bf89edc92f13ff2 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Mon, 28 Nov 2022 01:43:17 +0200 Subject: [PATCH 37/53] refactor: Replace re-serialization with plain coercion --- src/api/server_server.rs | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 8dee3974..cce4b931 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1108,14 +1108,14 @@ fn get_missing_events( // event's prev_events. stop_at_events.insert(queued_events[i].clone()); + let prev_events = pdu + .get("prev_events") + .ok_or_else(|| Error::bad_database("Event in db has no prev_events field."))?; + queued_events.extend_from_slice( - &serde_json::from_value::>( - serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { - Error::bad_database("Event in db has no prev_events field.") - })?) - .expect("canonical json is valid json value"), - ) - .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, + &serde_json::from_value::>(prev_events.clone().into()).map_err( + |_| Error::bad_database("Invalid prev_events content in pdu in db."), + )?, ); events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); } From 297c71680731695c18055b1a2285ec5e844646f1 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Mon, 28 Nov 2022 01:44:05 +0200 Subject: [PATCH 38/53] refactor: Replace imperative style with short-circuit .any() --- src/service/rooms/state_accessor/mod.rs | 26 +++++++------------------ 1 file changed, 7 insertions(+), 19 deletions(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index fd464d89..5a46d7e1 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -119,28 +119,16 @@ impl Service { true } Some(HistoryVisibility::Invited) => { - let mut visible = false; - // Allow if any member on requesting server was invited or joined, else deny - for member in current_server_members { - if self.user_was_invited(shortstatehash, &member)? - || self.user_was_joined(shortstatehash, &member)? - { - visible = true; - break; - } - } - visible + // Allow if any member on requesting server was AT LEAST invited, else deny + current_server_members + .into_iter() + .any(|member| self.user_was_invited(shortstatehash, &member)) } _ => { // Allow if any member on requested server was joined, else deny - let mut visible = false; - for member in current_server_members { - if self.user_was_joined(shortstatehash, &member)? { - visible = true; - break; - } - } - visible + current_server_members + .into_iter() + .any(|member| self.user_was_joined(shortstatehash, &member)) } }; From 65d7df290ff60121e99d1e0f9a124af28b6564d0 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Mon, 28 Nov 2022 01:58:25 +0200 Subject: [PATCH 39/53] refactor: Do not extract members for room each time --- src/api/server_server.rs | 24 +++++++++++++++++++++--- src/service/rooms/state_accessor/mod.rs | 17 +++-------------- 2 files changed, 24 insertions(+), 17 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index cce4b931..08fecba3 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1084,8 +1084,9 @@ fn get_missing_events( if event_room_id != room_id { warn!( - "Evil event detected: Event {} found while searching in room {}", - queued_events[i], room_id + ?room_id, + evil_event = ?queued_events[i], + "Evil event detected while searching in room" ); return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -1093,9 +1094,26 @@ fn get_missing_events( )); } + let (room_members, room_errors): (Vec<_>, Vec<_>) = services() + .rooms + .state_cache + .room_members(room_id) + .partition(Result::is_ok); + + // Just log errors and continue with correct users + if !room_errors.is_empty() { + warn!(?room_id, "Some errors occurred when fetching room members"); + } + + let current_server_members: Vec = room_members + .into_iter() + .map(Result::unwrap) + .filter(|member| member.server_name() == sender_servername) + .collect(); + let event_is_visible = services().rooms.state_accessor.server_can_see_event( sender_servername, - room_id, + current_server_members.as_slice(), &queued_events[i], )?; diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 5a46d7e1..965b7e8d 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -12,8 +12,9 @@ use ruma::{ }, EventId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, }; +use tracing::warn; -use crate::{services, PduEvent, Result}; +use crate::{PduEvent, Result}; pub struct Service { pub db: &'static dyn Data, @@ -77,7 +78,7 @@ impl Service { pub fn server_can_see_event( &self, server_name: &ServerName, - room_id: &RoomId, + current_server_members: &[OwnedUserId], event_id: &EventId, ) -> Result { let shortstatehash = match self.pdu_shortstatehash(event_id) { @@ -94,18 +95,6 @@ impl Service { return Ok(*visibility); } - let current_server_members: Vec = services() - .rooms - .state_cache - .room_members(room_id) - .filter(|member| { - member - .as_ref() - .map(|member| member.server_name() == server_name) - .unwrap_or(true) - }) - .collect::>()?; - let history_visibility = self .state_get_content(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? .map(|content| match content.get("history_visibility") { From b0238669032d817a9262ccdeeedf11537eabe71e Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Mon, 28 Nov 2022 13:22:43 +0200 Subject: [PATCH 40/53] refactor: Use same order as in trait --- src/service/pdu.rs | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/service/pdu.rs b/src/service/pdu.rs index 593a687b..4b3c7428 100644 --- a/src/service/pdu.rs +++ b/src/service/pdu.rs @@ -281,6 +281,10 @@ impl state_res::Event for PduEvent { &self.sender } + fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { + MilliSecondsSinceUnixEpoch(self.origin_server_ts) + } + fn event_type(&self) -> &RoomEventType { &self.kind } @@ -289,10 +293,6 @@ impl state_res::Event for PduEvent { &self.content } - fn origin_server_ts(&self) -> MilliSecondsSinceUnixEpoch { - MilliSecondsSinceUnixEpoch(self.origin_server_ts) - } - fn state_key(&self) -> Option<&str> { self.state_key.as_deref() } From 179aafc9748a70562437f28979bb5d73ceb8ca26 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Mon, 28 Nov 2022 13:43:33 +0200 Subject: [PATCH 41/53] refactor: Take away some methods from public --- src/service/rooms/state_accessor/mod.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index 965b7e8d..195923e3 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -130,7 +130,7 @@ impl Service { } /// The user was a joined member at this state (potentially in the past) - pub fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { + fn user_was_joined(&self, shortstatehash: u64, user_id: &UserId) -> bool { self.db .user_membership(shortstatehash, user_id) .map(|s| s == MembershipState::Join) @@ -139,7 +139,7 @@ impl Service { /// The user was an invited or joined room member at this state (potentially /// in the past) - pub fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { + fn user_was_invited(&self, shortstatehash: u64, user_id: &UserId) -> bool { self.db .user_membership(shortstatehash, user_id) .map(|s| s == MembershipState::Join || s == MembershipState::Invite) From 7f8f6b52d98cb834973b57174bcf2111b7f2cb5d Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Thu, 8 Dec 2022 23:41:10 +0200 Subject: [PATCH 42/53] refactor: Pull up invariants from the loop --- src/api/server_server.rs | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 08fecba3..89d1b267 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1058,6 +1058,23 @@ fn get_missing_events( latest_events: &[OwnedEventId], limit: UInt, ) -> Result>> { + let (room_members, room_errors): (Vec<_>, Vec<_>) = services() + .rooms + .state_cache + .room_members(room_id) + .partition(Result::is_ok); + + // Just log errors and continue with correct users + if !room_errors.is_empty() { + warn!(?room_id, "Some errors occurred when fetching room members"); + } + + let current_server_members: Vec = room_members + .into_iter() + .map(Result::unwrap) + .filter(|member| member.server_name() == sender_servername) + .collect(); + let limit = u64::from(limit) as usize; let mut queued_events = latest_events.to_owned(); @@ -1094,23 +1111,6 @@ fn get_missing_events( )); } - let (room_members, room_errors): (Vec<_>, Vec<_>) = services() - .rooms - .state_cache - .room_members(room_id) - .partition(Result::is_ok); - - // Just log errors and continue with correct users - if !room_errors.is_empty() { - warn!(?room_id, "Some errors occurred when fetching room members"); - } - - let current_server_members: Vec = room_members - .into_iter() - .map(Result::unwrap) - .filter(|member| member.server_name() == sender_servername) - .collect(); - let event_is_visible = services().rooms.state_accessor.server_can_see_event( sender_servername, current_server_members.as_slice(), From 0d14451bfb2cfcfac856c8509f92ce1281752f97 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Wed, 14 Dec 2022 17:18:13 +0200 Subject: [PATCH 43/53] refactor: Rewrite backfill algorithm according to specification in more readable form --- src/api/server_server.rs | 202 +++++++++++++++++++++++++++------------ 1 file changed, 143 insertions(+), 59 deletions(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 89d1b267..8294b85b 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -43,12 +43,12 @@ use ruma::{ }, serde::{Base64, JsonObject, Raw}, to_device::DeviceIdOrAllDevices, - CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, - OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, UInt, + CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, + OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, UInt, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ - collections::{BTreeMap, HashSet}, + collections::{BTreeMap, HashSet, VecDeque}, fmt::Debug, mem, net::{IpAddr, SocketAddr}, @@ -1047,10 +1047,10 @@ pub async fn get_missing_events_route( Ok(get_missing_events::v1::Response { events }) } -// Recursively fetch events starting from `latest_events`, going backwards -// through each event's `prev_events` until reaching the `earliest_events`. -// -// Used by the federation /backfill and /get_missing_events routes. +/// Fetch events starting from `latest_events`, going backwards +/// through each event's `prev_events` until reaching the `earliest_events`. +/// +/// Used by the federation /backfill and /get_missing_events routes. fn get_missing_events( sender_servername: &ServerName, room_id: &RoomId, @@ -1075,72 +1075,156 @@ fn get_missing_events( .filter(|member| member.server_name() == sender_servername) .collect(); - let limit = u64::from(limit) as usize; + let event_filter = |event_id: &EventId| { + services() + .rooms + .state_accessor + .server_can_see_event( + sender_servername, + current_server_members.as_slice(), + event_id, + ) + .unwrap_or_default() + }; - let mut queued_events = latest_events.to_owned(); - let mut events = Vec::new(); + let pdu_filter = |pdu: &CanonicalJsonObject| { + let event_room_id = pdu + .get("room_id") + .and_then(|val| val.as_str()) + .and_then(|room_id_str| <&RoomId>::try_from(room_id_str).ok()); + + match event_room_id { + Some(event_room_id) => { + let valid_event = event_room_id != room_id; + if !valid_event { + error!(?room_id, ?event_room_id, "An evil event detected"); + } + valid_event + } + None => { + error!(?pdu, "Can't extract valid `room_id` from pdu"); + false + } + } + }; - let mut stop_at_events = HashSet::with_capacity(limit); - stop_at_events.extend(earliest_events.iter().cloned()); + #[inline] + fn get_pdu(event: &EventId) -> Option { + services() + .rooms + .timeline + .get_pdu_json(event) + .unwrap_or_default() + } - let mut i = 0; - while i < queued_events.len() && events.len() < limit { - if stop_at_events.contains(&queued_events[i]) { - i += 1; - continue; - } + let events = linearize_previous_events( + latest_events.into_iter().cloned(), + earliest_events.into_iter().cloned(), + limit, + get_pdu, + event_filter, + pdu_filter, + ); - if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + Ok(events) +} - let event_room_id = <&RoomId>::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; +/// Unwinds previous events by doing a breadth-first walk from given roots +/// +/// # Arguments +/// +/// * `roots`: Starting point to unwind event history +/// * `excluded`: Skipped events +/// * `limit`: How many events to extract +/// * `pdu_extractor`: Closure to extract PDU for given event_id, for example, from DB. +/// * `event_filter`: Closure to filter event by it's visiblity. It may or may not hit DB. +/// * `pdu_filter`: Closure to get basic validation against malformed PDUs. +/// +/// # Returns +/// +/// The previous events for given roots, without any `excluded` events, up to the provided `limit`. +/// +/// # Note +/// +/// In matrix specification, «Server-Server API», paragraph 8 there is no mention of previous events for excluded events. +/// Therefore, algorithm below excludes **only** events itself, but allows to process their history. +fn linearize_previous_events( + roots: E, + excluded: E, + limit: L, + pdu_extractor: P, + event_filter: F, + pdu_filter: V, +) -> Vec> +where + E: IntoIterator, + F: Fn(&EventId) -> bool, + L: Into, + V: Fn(&CanonicalJsonObject) -> bool, + P: Fn(&EventId) -> Option, +{ + let limit = limit.into() as usize; + assert!(limit > 0, "Limit should be > 0"); - if event_room_id != room_id { - warn!( - ?room_id, - evil_event = ?queued_events[i], - "Evil event detected while searching in room" - ); - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Evil event detected", - )); + #[inline] + fn get_previous_events(pdu: &CanonicalJsonObject) -> Option> { + match pdu.get("prev_events") { + None => { + error!(?pdu, "A stored event has no 'prev_events' field"); + return None; } + Some(prev_events) => { + let val = prev_events.clone().into(); + let events = serde_json::from_value::>(val); + if let Err(error) = events { + error!(?prev_events, ?error, "Broken 'prev_events' field"); + return None; + } + Some(events.unwrap_or_default()) + } + } + } - let event_is_visible = services().rooms.state_accessor.server_can_see_event( - sender_servername, - current_server_members.as_slice(), - &queued_events[i], - )?; + let mut visited: HashSet = Default::default(); + let mut history: Vec> = Default::default(); + let mut queue: VecDeque = Default::default(); + let excluded: HashSet<_> = excluded.into_iter().collect(); - if !event_is_visible { - i += 1; - continue; - } + // Add all roots into processing queue + for root in roots { + queue.push_back(root); + } - // Don't send this event again if it comes through some other - // event's prev_events. - stop_at_events.insert(queued_events[i].clone()); + while let Some(current_event) = queue.pop_front() { + // Return all collected events if reached limit + if history.len() >= limit { + return history; + } - let prev_events = pdu - .get("prev_events") - .ok_or_else(|| Error::bad_database("Event in db has no prev_events field."))?; + // Skip an entire branch containing incorrect events + if !event_filter(¤t_event) { + continue; + } - queued_events.extend_from_slice( - &serde_json::from_value::>(prev_events.clone().into()).map_err( - |_| Error::bad_database("Invalid prev_events content in pdu in db."), - )?, - ); - events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); + // Process PDU from a current event if it exists and valid + if let Some(pdu) = pdu_extractor(¤t_event).filter(&pdu_filter) { + if !&excluded.contains(¤t_event) { + history.push(PduEvent::convert_to_outgoing_federation_event(pdu.clone())); + } + + // Fetch previous events, if they exists + if let Some(previous_events) = get_previous_events(&pdu) { + for previous_event in previous_events { + if !visited.contains(&previous_event) { + visited.insert(previous_event.clone()); + queue.push_back(previous_event); + } + } + } } - i += 1; } - - Ok(events) + // All done, return collected events + history } /// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` From 060d88ff24e59b191be129eb96f8ab774d82de49 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Wed, 14 Dec 2022 17:19:01 +0200 Subject: [PATCH 44/53] refactor: Added tests to backfill --- src/api/server_server.rs | 229 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 229 insertions(+) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 8294b85b..2da88e6c 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1974,7 +1974,13 @@ pub async fn claim_keys_route( #[cfg(test)] mod tests { + use super::linearize_previous_events; use super::{add_port_to_hostname, get_ip_with_port, FedDest}; + use ruma::{CanonicalJsonObject, CanonicalJsonValue, OwnedEventId}; + use serde::{Deserialize, Serialize}; + use serde_json::value::RawValue; + use serde_json::Value; + use std::collections::HashMap; #[test] fn ips_get_default_ports() { @@ -2015,4 +2021,227 @@ mod tests { FedDest::Named(String::from("example.com"), String::from(":1337")) ) } + + type PduStorage = HashMap; + + #[derive(Debug, Serialize, Deserialize)] + struct MockPDU { + content: i32, + prev_events: Vec, + } + + fn mock_event_id(id: &i32) -> OwnedEventId { + const DOMAIN: &str = "canterlot.eq"; + ::try_from(format!("${id}:{DOMAIN}")).unwrap() + } + + fn create_graph(data: Vec<(i32, Vec)>) -> PduStorage { + data.iter() + .map(|(head, tail)| { + let key = mock_event_id(head); + let pdu = MockPDU { + content: *head, + prev_events: tail.iter().map(mock_event_id).collect(), + }; + let value = serde_json::to_value(pdu).unwrap(); + let value: CanonicalJsonValue = value.try_into().unwrap(); + (key, value.as_object().unwrap().to_owned()) + }) + .collect() + } + + fn mock_full_graph() -> PduStorage { + /* + (1) + __________|___________ + / / \ \ + (2) (3) (10) (11) + / \ / \ | | + (4) (5) (6) (7) (12) (13) + | | | + (8) (9) (14) + \ / + (15) + | + (16) + */ + create_graph(vec![ + (1, vec![2, 3, 10, 11]), + (2, vec![4, 5]), + (3, vec![6, 7]), + (4, vec![]), + (5, vec![8]), + (6, vec![9]), + (7, vec![]), + (8, vec![15]), + (9, vec![15]), + (10, vec![12]), + (11, vec![13]), + (12, vec![]), + (13, vec![14]), + (14, vec![]), + (15, vec![16]), + (16, vec![16]), + ]) + } + + fn extract_events_payload(events: Vec>) -> Vec { + events + .iter() + .map(|e| serde_json::from_str(e.get()).unwrap()) + .map(|p: MockPDU| p.content) + .collect() + } + + #[test] + fn backfill_empty() { + let events = linearize_previous_events( + vec![], + vec![], + 16u64, + |_| unreachable!(), + |_| true, + |_| true, + ); + assert!(events.is_empty()); + } + #[test] + fn backfill_limit() { + /* + (5) → (4) → (3) → (2) → (1) → × + */ + let events = create_graph(vec![ + (1, vec![]), + (2, vec![1]), + (3, vec![2]), + (4, vec![3]), + (5, vec![4]), + ]); + let roots = vec![mock_event_id(&5)]; + let result = linearize_previous_events( + roots, + vec![], + 3u64, + |e| events.get(e).cloned(), + |_| true, + |_| true, + ); + + assert_eq!(extract_events_payload(result), vec![5, 4, 3]) + } + + #[test] + fn backfill_bfs() { + let events = mock_full_graph(); + let roots = vec![mock_event_id(&1)]; + let result = linearize_previous_events( + roots, + vec![], + 100u64, + |e| events.get(e).cloned(), + |_| true, + |_| true, + ); + assert_eq!( + extract_events_payload(result), + vec![1, 2, 3, 10, 11, 4, 5, 6, 7, 12, 13, 8, 9, 14, 15, 16] + ) + } + + #[test] + fn backfill_subgraph() { + let events = mock_full_graph(); + let roots = vec![mock_event_id(&3)]; + let result = linearize_previous_events( + roots, + vec![], + 100u64, + |e| events.get(e).cloned(), + |_| true, + |_| true, + ); + assert_eq!(extract_events_payload(result), vec![3, 6, 7, 9, 15, 16]) + } + + #[test] + fn backfill_two_roots() { + let events = mock_full_graph(); + let roots = vec![mock_event_id(&3), mock_event_id(&11)]; + let result = linearize_previous_events( + roots, + vec![], + 100u64, + |e| events.get(e).cloned(), + |_| true, + |_| true, + ); + assert_eq!( + extract_events_payload(result), + vec![3, 11, 6, 7, 13, 9, 14, 15, 16] + ) + } + + #[test] + fn backfill_exclude_events() { + let events = mock_full_graph(); + let roots = vec![mock_event_id(&1)]; + let excluded_events = vec![ + mock_event_id(&14), + mock_event_id(&15), + mock_event_id(&16), + mock_event_id(&3), + ]; + let result = linearize_previous_events( + roots, + excluded_events, + 100u64, + |e| events.get(e).cloned(), + |_| true, + |_| true, + ); + assert_eq!( + extract_events_payload(result), + vec![1, 2, 10, 11, 4, 5, 6, 7, 12, 13, 8, 9] + ) + } + + #[test] + fn backfill_exclude_branch_with_evil_event() { + let events = mock_full_graph(); + let roots = vec![mock_event_id(&1)]; + let result = linearize_previous_events( + roots, + vec![], + 100u64, + |e| events.get(e).cloned(), + |_| true, + |e| { + let value: Value = CanonicalJsonValue::Object(e.clone()).into(); + let pdu: MockPDU = serde_json::from_value(value).unwrap(); + pdu.content != 3 + }, + ); + assert_eq!( + extract_events_payload(result), + vec![1, 2, 10, 11, 4, 5, 12, 13, 8, 14, 15, 16] + ) + } + + #[test] + fn backfill_exclude_branch_with_inaccessible_event() { + let events = mock_full_graph(); + let roots = vec![mock_event_id(&1)]; + let result = linearize_previous_events( + roots, + vec![], + 100u64, + |e| events.get(e).cloned(), + |e| e != mock_event_id(&3), + |_| true, + ); + assert_eq!( + extract_events_payload(result), + vec![1, 2, 10, 11, 4, 5, 12, 13, 8, 14, 15, 16] + ) + } } From f2a9080ba787fac09f0ae40ad9296544d563aef1 Mon Sep 17 00:00:00 2001 From: "Andriy Kushnir (Orhideous)" Date: Wed, 14 Dec 2022 18:02:35 +0200 Subject: [PATCH 45/53] refactor: Fixed logical condition for evil events --- src/api/server_server.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 2da88e6c..6604896d 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1095,7 +1095,7 @@ fn get_missing_events( match event_room_id { Some(event_room_id) => { - let valid_event = event_room_id != room_id; + let valid_event = event_room_id == room_id; if !valid_event { error!(?room_id, ?event_room_id, "An evil event detected"); } From d2cc9e105a8e2217b34d73d9a6bf2bc6788887ae Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 16 Dec 2022 08:37:28 +0100 Subject: [PATCH 46/53] chore: clippy --- src/api/client_server/membership.rs | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index f6e94e67..19ee3f20 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -584,7 +584,7 @@ async fn join_room_by_id_helper( if let Some(signed_raw) = &send_join_response.room_state.event { let (signed_event_id, signed_value) = - match gen_event_id_canonical_json(&signed_raw, &room_version_id) { + match gen_event_id_canonical_json(signed_raw, &room_version_id) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -784,7 +784,7 @@ async fn join_room_by_id_helper( // TODO: Conduit does not implement restricted join rules yet, we always ask over // federation let join_rules_event = services().rooms.state_accessor.room_state_get( - &room_id, + room_id, &StateEventType::RoomJoinRules, "", )?; @@ -1114,7 +1114,7 @@ pub(crate) async fn invite_helper<'a>( create_invite::v2::Request { room_id, event_id: &pdu.event_id, - room_version: &room_version_id, + room_version: room_version_id, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, From 03029711fe53d29c7c95f2f50e2c1face97256f5 Mon Sep 17 00:00:00 2001 From: chenyuqide Date: Sun, 10 Apr 2022 18:57:15 +0800 Subject: [PATCH 47/53] Add client space api '/rooms/{roomId}/hierarchy' --- src/api/client_server/mod.rs | 2 + src/api/client_server/space.rs | 242 +++++++++++++++++++++++++++++++++ src/main.rs | 1 + 3 files changed, 245 insertions(+) create mode 100644 src/api/client_server/space.rs diff --git a/src/api/client_server/mod.rs b/src/api/client_server/mod.rs index 6ed17e76..4cc000ed 100644 --- a/src/api/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -20,6 +20,7 @@ mod report; mod room; mod search; mod session; +mod space; mod state; mod sync; mod tag; @@ -52,6 +53,7 @@ pub use report::*; pub use room::*; pub use search::*; pub use session::*; +pub use space::*; pub use state::*; pub use sync::*; pub use tag::*; diff --git a/src/api/client_server/space.rs b/src/api/client_server/space.rs new file mode 100644 index 00000000..8cc9221f --- /dev/null +++ b/src/api/client_server/space.rs @@ -0,0 +1,242 @@ +use std::{collections::HashSet, sync::Arc}; + +use crate::{services, Error, PduEvent, Result, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + space::{get_hierarchy, SpaceHierarchyRoomsChunk, SpaceRoomJoinRule}, + }, + events::{ + room::{ + avatar::RoomAvatarEventContent, + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + name::RoomNameEventContent, + topic::RoomTopicEventContent, + }, + space::child::{HierarchySpaceChildEvent, SpaceChildEventContent}, + StateEventType, + }, + serde::Raw, + MilliSecondsSinceUnixEpoch, OwnedRoomId, RoomId, +}; +use serde_json; +use tracing::warn; + +/// # `GET /_matrix/client/v1/rooms/{room_id}/hierarchy`` +/// +/// Paginates over the space tree in a depth-first manner to locate child rooms of a given space. +/// +/// - TODO: Use federation for unknown room. +/// +pub async fn get_hierarchy_route( + body: Ruma, +) -> Result { + // from format is '{suggested_only}|{max_depth}|{skip}' + let (suggested_only, max_depth, start) = body + .from + .as_ref() + .map_or( + Some(( + body.suggested_only, + body.max_depth + .map_or(services().globals.hierarchy_max_depth(), |v| v.into()) + .min(services().globals.hierarchy_max_depth()), + 0, + )), + |from| { + let mut p = from.split('|'); + Some(( + p.next()?.trim().parse().ok()?, + p.next()? + .trim() + .parse::() + .ok()? + .min(services().globals.hierarchy_max_depth()), + p.next()?.trim().parse().ok()?, + )) + }, + ) + .ok_or(Error::BadRequest(ErrorKind::InvalidParam, "Invalid from"))?; + + let limit = body.limit.map_or(20u64, |v| v.into()) as usize; + let mut skip = start; + + // Set for avoid search in loop. + let mut room_set = HashSet::new(); + let mut rooms_chunk: Vec = vec![]; + let mut stack = vec![(0, body.room_id.clone())]; + + while let (Some((depth, room_id)), true) = (stack.pop(), rooms_chunk.len() < limit) { + let (childern, pdus): (Vec<_>, Vec<_>) = services() + .rooms + .state_accessor + .room_state_full(&room_id) + .await? + .into_iter() + .filter_map(|((e_type, key), pdu)| { + (e_type == StateEventType::SpaceChild && !room_set.contains(&room_id)) + .then_some((key, pdu)) + }) + .unzip(); + + if skip == 0 { + if rooms_chunk.len() < limit { + room_set.insert(room_id.clone()); + rooms_chunk.push(get_room_chunk(room_id, suggested_only, pdus).await?); + } + } else { + skip -= 1; + } + + if depth < max_depth { + childern.into_iter().rev().for_each(|key| { + stack.push((depth + 1, RoomId::parse(key).unwrap())); + }); + } + } + + Ok(get_hierarchy::v1::Response { + next_batch: (!stack.is_empty()).then_some(format!( + "{}|{}|{}", + suggested_only, + max_depth, + start + limit + )), + rooms: rooms_chunk, + }) +} + +async fn get_room_chunk( + room_id: OwnedRoomId, + suggested_only: bool, + phus: Vec>, +) -> Result { + Ok(SpaceHierarchyRoomsChunk { + canonical_alias: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "") + .ok() + .and_then(|s| { + serde_json::from_str(s?.content.get()) + .map(|c: RoomCanonicalAliasEventContent| c.alias) + .ok()? + }), + name: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomName, "") + .ok() + .flatten() + .and_then(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomNameEventContent| c.name) + .ok()? + }), + num_joined_members: services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or_else(|| { + warn!("Room {} has no member count", &room_id); + 0 + }) + .try_into() + .expect("user count should not be that big"), + topic: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomTopic, "") + .ok() + .and_then(|s| { + serde_json::from_str(s?.content.get()) + .ok() + .map(|c: RoomTopicEventContent| c.topic) + }), + world_readable: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility == HistoryVisibility::WorldReadable + }) + .map_err(|_| { + Error::bad_database("Invalid room history visibility event in database.") + }) + })?, + guest_can_join: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomGuestAccessEventContent| c.guest_access == GuestAccess::CanJoin) + .map_err(|_| { + Error::bad_database("Invalid room guest access event in database.") + }) + })?, + avatar_url: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomAvatar, "") + .ok() + .and_then(|s| { + serde_json::from_str(s?.content.get()) + .map(|c: RoomAvatarEventContent| c.url) + .ok()? + }), + join_rule: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| match c.join_rule { + JoinRule::Invite => SpaceRoomJoinRule::Invite, + JoinRule::Knock => SpaceRoomJoinRule::Knock, + JoinRule::Private => SpaceRoomJoinRule::Private, + JoinRule::Public => SpaceRoomJoinRule::Public, + JoinRule::Restricted(_) => SpaceRoomJoinRule::Restricted, + // Can't convert two type. + JoinRule::_Custom(_) => SpaceRoomJoinRule::Private, + }) + .map_err(|_| Error::bad_database("Invalid room join rules event in database.")) + }) + .ok_or_else(|| Error::bad_database("Invalid room join rules event in database."))??, + room_type: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCreate, "") + .map(|s| { + serde_json::from_str(s?.content.get()) + .map(|c: RoomCreateEventContent| c.room_type) + .ok()? + }) + .ok() + .flatten(), + children_state: phus + .into_iter() + .flat_map(|pdu| { + Some(HierarchySpaceChildEvent { + // Ignore unsuggested rooms if suggested_only is set + content: serde_json::from_str(pdu.content.get()).ok().filter( + |pdu: &SpaceChildEventContent| { + !suggested_only || pdu.suggested.unwrap_or(false) + }, + )?, + sender: pdu.sender.clone(), + state_key: pdu.state_key.clone()?, + origin_server_ts: MilliSecondsSinceUnixEpoch(pdu.origin_server_ts), + }) + }) + .filter_map(|hsce| Raw::::new(&hsce).ok()) + .collect::>(), + room_id, + }) +} diff --git a/src/main.rs b/src/main.rs index e754b84f..75550231 100644 --- a/src/main.rs +++ b/src/main.rs @@ -318,6 +318,7 @@ fn routes() -> Router { .ruma_route(client_server::send_state_event_for_key_route) .ruma_route(client_server::get_state_events_route) .ruma_route(client_server::get_state_events_for_key_route) + .ruma_route(client_server::get_hierarchy_route) // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes // share one Ruma request / response type pair with {get,send}_state_event_for_key_route .route( From 3ff6d54be642fb9cdfebcc67a3ff78b0af57c5e2 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 2 Sep 2022 16:28:24 +0200 Subject: [PATCH 48/53] Add hierarchy_max_depth config option --- src/config/mod.rs | 6 ++++++ src/service/globals/mod.rs | 4 ++++ 2 files changed, 10 insertions(+) diff --git a/src/config/mod.rs b/src/config/mod.rs index 6b862bb6..eb15db48 100644 --- a/src/config/mod.rs +++ b/src/config/mod.rs @@ -52,6 +52,8 @@ pub struct Config { pub allow_unstable_room_versions: bool, #[serde(default = "default_default_room_version")] pub default_room_version: RoomVersionId, + #[serde(default = "default_hierarchy_max_depth")] + pub hierarchy_max_depth: u64, #[serde(default = "false_fn")] pub allow_jaeger: bool, #[serde(default = "false_fn")] @@ -261,3 +263,7 @@ fn default_turn_ttl() -> u64 { pub fn default_default_room_version() -> RoomVersionId { RoomVersionId::V9 } + +fn default_hierarchy_max_depth() -> u64 { + 6 +} diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index affc0516..7aa729f9 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -250,6 +250,10 @@ impl Service { self.config.enable_lightning_bolt } + pub fn hierarchy_max_depth(&self) -> u64 { + self.config.hierarchy_max_depth + } + pub fn trusted_servers(&self) -> &[OwnedServerName] { &self.config.trusted_servers } From 833d8f1b70b75062f2e3610b9903d6044c474ce7 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Wed, 17 Aug 2022 19:16:21 +0200 Subject: [PATCH 49/53] Clean up client-server room hierarchy route --- src/api/client_server/space.rs | 35 ++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/space.rs b/src/api/client_server/space.rs index 8cc9221f..0a7392e4 100644 --- a/src/api/client_server/space.rs +++ b/src/api/client_server/space.rs @@ -35,6 +35,37 @@ use tracing::warn; pub async fn get_hierarchy_route( body: Ruma, ) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + // Check if room is world readable + let is_world_readable = services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? + .map_or(Ok(false), |s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomHistoryVisibilityEventContent| { + c.history_visibility == HistoryVisibility::WorldReadable + }) + .map_err(|_| { + Error::bad_database("Invalid room history visibility event in database.") + }) + }) + .unwrap_or(false); + + // Reject if user not in room and not world readable + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + && !is_world_readable + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + // from format is '{suggested_only}|{max_depth}|{skip}' let (suggested_only, max_depth, start) = body .from @@ -200,11 +231,11 @@ async fn get_room_chunk( .map(|c: RoomJoinRulesEventContent| match c.join_rule { JoinRule::Invite => SpaceRoomJoinRule::Invite, JoinRule::Knock => SpaceRoomJoinRule::Knock, + JoinRule::KnockRestricted(_) => SpaceRoomJoinRule::KnockRestricted, JoinRule::Private => SpaceRoomJoinRule::Private, JoinRule::Public => SpaceRoomJoinRule::Public, JoinRule::Restricted(_) => SpaceRoomJoinRule::Restricted, - // Can't convert two type. - JoinRule::_Custom(_) => SpaceRoomJoinRule::Private, + JoinRule::_Custom(_) => SpaceRoomJoinRule::from(c.join_rule.as_str()), }) .map_err(|_| Error::bad_database("Invalid room join rules event in database.")) }) From 03c02133a2b2280714dcbe05ac4c23d11f3c9f3a Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Sat, 15 Oct 2022 14:29:20 +0200 Subject: [PATCH 50/53] Cleanly handle invalid rooms on hierarchy endpoint --- src/api/client_server/space.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/src/api/client_server/space.rs b/src/api/client_server/space.rs index 0a7392e4..17bd5005 100644 --- a/src/api/client_server/space.rs +++ b/src/api/client_server/space.rs @@ -117,7 +117,9 @@ pub async fn get_hierarchy_route( if skip == 0 { if rooms_chunk.len() < limit { room_set.insert(room_id.clone()); - rooms_chunk.push(get_room_chunk(room_id, suggested_only, pdus).await?); + if let Ok(chunk) = get_room_chunk(room_id, suggested_only, pdus).await { + rooms_chunk.push(chunk) + }; } } else { skip -= 1; @@ -144,7 +146,7 @@ pub async fn get_hierarchy_route( async fn get_room_chunk( room_id: OwnedRoomId, suggested_only: bool, - phus: Vec>, + pdus: Vec>, ) -> Result { Ok(SpaceHierarchyRoomsChunk { canonical_alias: services() @@ -251,7 +253,7 @@ async fn get_room_chunk( }) .ok() .flatten(), - children_state: phus + children_state: pdus .into_iter() .flat_map(|pdu| { Some(HierarchySpaceChildEvent { From aa725b0ecb85324b87602b260ee7d754e5f64c2e Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Fri, 16 Dec 2022 08:50:17 +0100 Subject: [PATCH 51/53] chore: clippy --- src/api/client_server/membership.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 19ee3f20..38e1240b 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -595,7 +595,7 @@ async fn join_room_by_id_helper( } }; - if &signed_event_id != event_id { + if signed_event_id != event_id { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Server sent event with wrong event id", @@ -916,7 +916,7 @@ async fn join_room_by_id_helper( } }; - if &signed_event_id != event_id { + if signed_event_id != event_id { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Server sent event with wrong event id", From 3ac3bdbac026d641c707fb2cc112a809c394f949 Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Wed, 21 Dec 2022 17:28:21 +0100 Subject: [PATCH 52/53] feat: Keep track of avatar urls, displaynames, and blurhashes of remote users for the room directory --- src/api/client_server/membership.rs | 245 +++++++------ src/api/client_server/message.rs | 30 +- src/api/client_server/profile.rs | 80 +++- src/api/client_server/redact.rs | 34 +- src/api/client_server/room.rs | 487 ++++++++++++++----------- src/api/client_server/state.rs | 28 +- src/api/client_server/sync.rs | 6 +- src/api/server_server.rs | 20 +- src/service/admin/mod.rs | 477 +++++++++++++----------- src/service/globals/mod.rs | 2 +- src/service/rooms/event_handler/mod.rs | 40 +- src/service/rooms/state/mod.rs | 37 +- src/service/rooms/state_cache/mod.rs | 36 +- src/service/rooms/timeline/mod.rs | 66 ++-- 14 files changed, 908 insertions(+), 680 deletions(-) diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 87954ed3..6615547a 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -198,18 +198,22 @@ pub async fn kick_user_route( ); let state_lock = mutex_state.lock().await; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(body.user_id.to_string()), - redacts: None, - }, - sender_user, - &body.room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; drop(state_lock); @@ -264,18 +268,22 @@ pub async fn ban_user_route(body: Ruma) -> Result return Ok(join_room_by_id::v3::Response::new(room_id.to_owned())), Err(e) => e, }; @@ -1259,28 +1280,32 @@ pub(crate) async fn invite_helper<'a>( ); let state_lock = mutex_state.lock().await; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: services().users.displayname(user_id)?, - avatar_url: services().users.avatar_url(user_id)?, - is_direct: Some(is_direct), - third_party_invite: None, - blurhash: services().users.blurhash(user_id)?, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - sender_user, - room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: services().users.displayname(user_id)?, + avatar_url: services().users.avatar_url(user_id)?, + is_direct: Some(is_direct), + third_party_invite: None, + blurhash: services().users.blurhash(user_id)?, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await?; drop(state_lock); @@ -1334,14 +1359,18 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { )?; // We always drop the invite, we can't rely on other servers - services().rooms.state_cache.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - true, - )?; + services() + .rooms + .state_cache + .update_membership( + room_id, + user_id, + RoomMemberEventContent::new(MembershipState::Leave), + user_id, + last_state, + true, + ) + .await?; } else { let mutex_state = Arc::clone( services() @@ -1365,14 +1394,18 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { None => { error!("Trying to leave a room you are not a member of."); - services().rooms.state_cache.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - None, - true, - )?; + services() + .rooms + .state_cache + .update_membership( + room_id, + user_id, + RoomMemberEventContent::new(MembershipState::Leave), + user_id, + None, + true, + ) + .await?; return Ok(()); } Some(e) => e, @@ -1383,18 +1416,22 @@ pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { event.membership = MembershipState::Leave; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + &state_lock, + ) + .await?; } Ok(()) diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 6ad07517..a7c62a99 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -70,19 +70,23 @@ pub async fn send_message_event_route( let mut unsigned = BTreeMap::new(); unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); - let event_id = services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: body.event_type.to_string().into(), - content: serde_json::from_str(body.body.body.json().get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, - unsigned: Some(unsigned), - state_key: None, - redacts: None, - }, - sender_user, - &body.room_id, - &state_lock, - )?; + let event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: body.event_type.to_string().into(), + content: serde_json::from_str(body.body.body.json().get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + unsigned: Some(unsigned), + state_key: None, + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; services().transaction_ids.add_txnid( sender_user, diff --git a/src/api/client_server/profile.rs b/src/api/client_server/profile.rs index 6400e891..f1d3ac5b 100644 --- a/src/api/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -83,12 +83,11 @@ pub async fn set_displayname_route( ); let state_lock = mutex_state.lock().await; - let _ = services().rooms.timeline.build_and_append_pdu( - pdu_builder, - sender_user, - &room_id, - &state_lock, - ); + let _ = services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .await; // Presence update services().rooms.edus.presence.update_presence( @@ -115,15 +114,17 @@ pub async fn set_displayname_route( Ok(set_display_name::v3::Response {}) } -/// # `GET /_matrix/client/r0/profile/{userId}/displayname` +/// # `GET /_matrix/client/v3/profile/{userId}/displayname` /// /// Returns the displayname of the user. /// -/// - If user is on another server: Fetches displayname over federation +/// - If user is on another server and we do not have a copy, fetch over federation pub async fn get_displayname_route( body: Ruma, ) -> Result { - if body.user_id.server_name() != services().globals.server_name() { + if (services().users.exists(&body.user_id)?) + && (body.user_id.server_name() != services().globals.server_name()) + { let response = services() .sending .send_federation_request( @@ -135,6 +136,18 @@ pub async fn get_displayname_route( ) .await?; + // Create and update our local copy of the user + let _ = services().users.create(&body.user_id, None); + let _ = services() + .users + .set_displayname(&body.user_id, response.displayname.clone()); + let _ = services() + .users + .set_avatar_url(&body.user_id, response.avatar_url); + let _ = services() + .users + .set_blurhash(&body.user_id, response.blurhash); + return Ok(get_display_name::v3::Response { displayname: response.displayname, }); @@ -218,12 +231,11 @@ pub async fn set_avatar_url_route( ); let state_lock = mutex_state.lock().await; - let _ = services().rooms.timeline.build_and_append_pdu( - pdu_builder, - sender_user, - &room_id, - &state_lock, - ); + let _ = services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .await; // Presence update services().rooms.edus.presence.update_presence( @@ -250,15 +262,17 @@ pub async fn set_avatar_url_route( Ok(set_avatar_url::v3::Response {}) } -/// # `GET /_matrix/client/r0/profile/{userId}/avatar_url` +/// # `GET /_matrix/client/v3/profile/{userId}/avatar_url` /// /// Returns the avatar_url and blurhash of the user. /// -/// - If user is on another server: Fetches avatar_url and blurhash over federation +/// - If user is on another server and we do not have a copy, fetch over federation pub async fn get_avatar_url_route( body: Ruma, ) -> Result { - if body.user_id.server_name() != services().globals.server_name() { + if (services().users.exists(&body.user_id)?) + && (body.user_id.server_name() != services().globals.server_name()) + { let response = services() .sending .send_federation_request( @@ -270,6 +284,18 @@ pub async fn get_avatar_url_route( ) .await?; + // Create and update our local copy of the user + let _ = services().users.create(&body.user_id, None); + let _ = services() + .users + .set_displayname(&body.user_id, response.displayname); + let _ = services() + .users + .set_avatar_url(&body.user_id, response.avatar_url.clone()); + let _ = services() + .users + .set_blurhash(&body.user_id, response.blurhash.clone()); + return Ok(get_avatar_url::v3::Response { avatar_url: response.avatar_url, blurhash: response.blurhash, @@ -286,11 +312,13 @@ pub async fn get_avatar_url_route( /// /// Returns the displayname, avatar_url and blurhash of the user. /// -/// - If user is on another server: Fetches profile over federation +/// - If user is on another server and we do not have a copy, fetch over federation pub async fn get_profile_route( body: Ruma, ) -> Result { - if body.user_id.server_name() != services().globals.server_name() { + if (services().users.exists(&body.user_id)?) + && (body.user_id.server_name() != services().globals.server_name()) + { let response = services() .sending .send_federation_request( @@ -302,6 +330,18 @@ pub async fn get_profile_route( ) .await?; + // Create and update our local copy of the user + let _ = services().users.create(&body.user_id, None); + let _ = services() + .users + .set_displayname(&body.user_id, response.displayname.clone()); + let _ = services() + .users + .set_avatar_url(&body.user_id, response.avatar_url.clone()); + let _ = services() + .users + .set_blurhash(&body.user_id, response.blurhash.clone()); + return Ok(get_profile::v3::Response { displayname: response.displayname, avatar_url: response.avatar_url, diff --git a/src/api/client_server/redact.rs b/src/api/client_server/redact.rs index a29a5610..88059d6b 100644 --- a/src/api/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -30,21 +30,25 @@ pub async fn redact_event_route( ); let state_lock = mutex_state.lock().await; - let event_id = services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomRedaction, - content: to_raw_value(&RoomRedactionEventContent { - reason: body.reason.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: Some(body.event_id.into()), - }, - sender_user, - &body.room_id, - &state_lock, - )?; + let event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomRedaction, + content: to_raw_value(&RoomRedactionEventContent { + reason: body.reason.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: Some(body.event_id.into()), + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; drop(state_lock); diff --git a/src/api/client_server/room.rs b/src/api/client_server/room.rs index c77cfa9b..d2238b44 100644 --- a/src/api/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -173,42 +173,50 @@ pub async fn create_room_route( } // 1. The room create event - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; // 2. Let the room creator join - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: services().users.displayname(sender_user)?, - avatar_url: services().users.avatar_url(sender_user)?, - is_direct: Some(body.is_direct), - third_party_invite: None, - blurhash: services().users.blurhash(sender_user)?, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: Some(body.is_direct), + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; // 3. Power levels @@ -245,30 +253,14 @@ pub async fn create_room_route( } } - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_content) - .expect("to_raw_value always works on serde_json::Value"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; - - // 4. Canonical room alias - if let Some(room_alias_id) = &alias { - services().rooms.timeline.build_and_append_pdu( + services() + .rooms + .timeline + .build_and_append_pdu( PduBuilder { - event_type: RoomEventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(room_alias_id.to_owned()), - alt_aliases: vec![], - }) - .expect("We checked that alias earlier, it must be fine"), + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&power_levels_content) + .expect("to_raw_value always works on serde_json::Value"), unsigned: None, state_key: Some("".to_owned()), redacts: None, @@ -276,64 +268,100 @@ pub async fn create_room_route( sender_user, &room_id, &state_lock, - )?; + ) + .await?; + + // 4. Canonical room alias + if let Some(room_alias_id) = &alias { + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(room_alias_id.to_owned()), + alt_aliases: vec![], + }) + .expect("We checked that alias earlier, it must be fine"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; } // 5. Events set by preset // 5.1 Join Rules - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { - RoomPreset::PublicChat => JoinRule::Public, - // according to spec "invite" is the default - _ => JoinRule::Invite, - })) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { + RoomPreset::PublicChat => JoinRule::Public, + // according to spec "invite" is the default + _ => JoinRule::Invite, + })) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; // 5.2 History Visibility - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; // 5.3 Guest Access - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { - RoomPreset::PublicChat => GuestAccess::Forbidden, - _ => GuestAccess::CanJoin, - })) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { + RoomPreset::PublicChat => GuestAccess::Forbidden, + _ => GuestAccess::CanJoin, + })) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; // 6. Events listed in initial_state for event in &body.initial_state { @@ -352,47 +380,54 @@ pub async fn create_room_route( continue; } - services().rooms.timeline.build_and_append_pdu( - pdu_builder, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu(pdu_builder, sender_user, &room_id, &state_lock) + .await?; } // 7. Events implied by name and topic if let Some(name) = &body.name { - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; } if let Some(topic) = &body.topic { - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: topic.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: topic.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &room_id, + &state_lock, + ) + .await?; } // 8. Events implied by invite (and TODO: invite_3pid) @@ -523,22 +558,26 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomTombstone, - content: to_raw_value(&RoomTombstoneEventContent { - body: "This room has been replaced".to_owned(), - replacement_room: replacement_room.clone(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &body.room_id, - &state_lock, - )?; + let tombstone_event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomTombstone, + content: to_raw_value(&RoomTombstoneEventContent { + body: "This room has been replaced".to_owned(), + replacement_room: replacement_room.clone(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; // Change lock to replacement room drop(state_lock); @@ -605,43 +644,51 @@ pub async fn upgrade_room_route( )); } - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomCreate, - content: to_raw_value(&create_event_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &replacement_room, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCreate, + content: to_raw_value(&create_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; // Join the new room - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: services().users.displayname(sender_user)?, - avatar_url: services().users.avatar_url(sender_user)?, - is_direct: None, - third_party_invite: None, - blurhash: services().users.blurhash(sender_user)?, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - sender_user, - &replacement_room, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, + is_direct: None, + third_party_invite: None, + blurhash: services().users.blurhash(sender_user)?, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(sender_user.to_string()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; // Recommended transferable state events list from the specs let transferable_state_events = vec![ @@ -668,18 +715,22 @@ pub async fn upgrade_room_route( None => continue, // Skipping missing events. }; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: event_type.to_string().into(), - content: event_content, - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &replacement_room, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: event_type.to_string().into(), + content: event_content, + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &replacement_room, + &state_lock, + ) + .await?; } // Moves any local aliases to the new room @@ -713,19 +764,23 @@ pub async fn upgrade_room_route( power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomPowerLevels, - content: to_raw_value(&power_levels_event_content) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - sender_user, - &body.room_id, - &state_lock, - )?; + let _ = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&power_levels_event_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + ) + .await?; drop(state_lock); diff --git a/src/api/client_server/state.rs b/src/api/client_server/state.rs index d9c14648..12af5199 100644 --- a/src/api/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -287,18 +287,22 @@ async fn send_state_event_for_key_helper( ); let state_lock = mutex_state.lock().await; - let event_id = services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: event_type.to_string().into(), - content: serde_json::from_str(json.json().get()).expect("content is valid json"), - unsigned: None, - state_key: Some(state_key), - redacts: None, - }, - sender_user, - room_id, - &state_lock, - )?; + let event_id = services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: event_type.to_string().into(), + content: serde_json::from_str(json.json().get()).expect("content is valid json"), + unsigned: None, + state_key: Some(state_key), + redacts: None, + }, + sender_user, + room_id, + &state_lock, + ) + .await?; Ok(event_id) } diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs index 568a23ce..07f8400c 100644 --- a/src/api/client_server/sync.rs +++ b/src/api/client_server/sync.rs @@ -231,7 +231,7 @@ async fn sync_helper( .entry(room_id.clone()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; drop(insert_lock); } @@ -847,7 +847,7 @@ async fn sync_helper( .entry(room_id.clone()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; drop(insert_lock); } @@ -979,7 +979,7 @@ async fn sync_helper( .entry(room_id.clone()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; drop(insert_lock); } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index fc3e2c0f..72f6cae7 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1615,14 +1615,18 @@ pub async fn create_invite_route( .state_cache .server_in_room(services().globals.server_name(), &body.room_id)? { - services().rooms.state_cache.update_membership( - &body.room_id, - &invited_user, - MembershipState::Invite, - &sender, - Some(invite_state), - true, - )?; + services() + .rooms + .state_cache + .update_membership( + &body.room_id, + &invited_user, + RoomMemberEventContent::new(MembershipState::Invite), + &sender, + Some(invite_state), + true, + ) + .await?; } Ok(create_invite::v2::Response { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index 77f351a9..2df3180f 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -26,7 +26,7 @@ use ruma::{ EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, }; use serde_json::value::to_raw_value; -use tokio::sync::{mpsc, Mutex, MutexGuard}; +use tokio::sync::{mpsc, Mutex}; use crate::{ api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, @@ -206,26 +206,6 @@ impl Service { .expect("Database data for admin room alias must be valid") .expect("Admin room must exist"); - let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| { - services() - .rooms - .timeline - .build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMessage, - content: to_raw_value(&message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - mutex_lock, - ) - .unwrap(); - }; - loop { tokio::select! { Some(event) = receiver.recv() => { @@ -245,7 +225,20 @@ impl Service { let state_lock = mutex_state.lock().await; - send_message(message_content, &state_lock); + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&message_content) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + &state_lock) + .await + .unwrap(); drop(state_lock); } @@ -853,164 +846,202 @@ impl Service { content.room_version = services().globals.default_room_version(); // 1. The room create event - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 2. Make conduit bot join - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(conduit_user.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 3. Power levels let mut users = BTreeMap::new(); users.insert(conduit_user.clone(), 100.into()); - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 4.1 Join Rules - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 4.2 History Visibility - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 4.3 Guest Access - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new( + GuestAccess::Forbidden, + )) .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 5. Events implied by name and topic let room_name = format!("{} Admin Room", services().globals.server_name()); - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", services().globals.server_name()), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: format!("Manage {}", services().globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // 6. Room alias let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name()) .try_into() .expect("#admins:server_name is a valid alias name"); - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; services().rooms.alias.set_alias(&alias, &room_id)?; @@ -1052,72 +1083,84 @@ impl Service { .expect("@conduit:server_name is valid"); // Invite and join the real user - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: Some(displayname), - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - join_authorized_via_users_server: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + &room_id, + &state_lock, + ) + .await?; // Set power level let mut users = BTreeMap::new(); users.insert(conduit_user.to_owned(), 100.into()); users.insert(user_id.to_owned(), 100.into()); - services().rooms.timeline.build_and_append_pdu( - PduBuilder { - event_type: RoomEventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &state_lock, - )?; + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + ) + .await?; // Send welcome message services().rooms.timeline.build_and_append_pdu( @@ -1135,7 +1178,7 @@ impl Service { &conduit_user, &room_id, &state_lock, - )?; + ).await?; Ok(()) } diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs index c0fcb4bd..fc5268da 100644 --- a/src/service/globals/mod.rs +++ b/src/service/globals/mod.rs @@ -52,7 +52,7 @@ pub struct Service { pub bad_signature_ratelimiter: Arc, RateLimitState>>>, pub servername_ratelimiter: Arc>>>, pub sync_receivers: RwLock>, - pub roomid_mutex_insert: RwLock>>>, + pub roomid_mutex_insert: RwLock>>>, pub roomid_mutex_state: RwLock>>>, pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer pub roomid_federationhandletime: RwLock>, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 7531674b..7180008d 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -801,14 +801,18 @@ impl Service { .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; if soft_fail { - services().rooms.timeline.append_incoming_pdu( - &incoming_pdu, - val, - extremities.iter().map(|e| (**e).to_owned()).collect(), - state_ids_compressed, - soft_fail, - &state_lock, - )?; + services() + .rooms + .timeline + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .await?; // Soft fail, we keep the event as an outlier but don't add it to the timeline warn!("Event was soft failed: {:?}", incoming_pdu); @@ -1004,14 +1008,18 @@ impl Service { // We use the `state_at_event` instead of `state_after` so we accurately // represent the state for this event. - let pdu_id = services().rooms.timeline.append_incoming_pdu( - &incoming_pdu, - val, - extremities.iter().map(|e| (**e).to_owned()).collect(), - state_ids_compressed, - soft_fail, - &state_lock, - )?; + let pdu_id = services() + .rooms + .timeline + .append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + ) + .await?; info!("Appended incoming pdu"); diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 3072b80f..b7a2cb79 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -7,14 +7,13 @@ use std::{ pub use data::Data; use ruma::{ events::{ - room::{create::RoomCreateEventContent, member::MembershipState}, + room::{create::RoomCreateEventContent, member::RoomMemberEventContent}, AnyStrippedStateEvent, RoomEventType, StateEventType, }, serde::Raw, state_res::{self, StateMap}, EventId, OwnedEventId, RoomId, RoomVersionId, UserId, }; -use serde::Deserialize; use tokio::sync::MutexGuard; use tracing::warn; @@ -60,15 +59,11 @@ impl Service { Err(_) => continue, }; - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; + let membership_event = + match serde_json::from_str::(pdu.content.get()) { + Ok(e) => e, + Err(_) => continue, + }; let state_key = match pdu.state_key { Some(k) => k, @@ -80,14 +75,18 @@ impl Service { Err(_) => continue, }; - services().rooms.state_cache.update_membership( - room_id, - &user_id, - membership, - &pdu.sender, - None, - false, - )?; + services() + .rooms + .state_cache + .update_membership( + room_id, + &user_id, + membership_event, + &pdu.sender, + None, + false, + ) + .await?; } services().rooms.state_cache.update_joined_count(room_id)?; diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs index 32afdd4e..dfbe5db7 100644 --- a/src/service/rooms/state_cache/mod.rs +++ b/src/service/rooms/state_cache/mod.rs @@ -4,10 +4,14 @@ use std::{collections::HashSet, sync::Arc}; pub use data::Data; use ruma::{ + api::federation::{self, query::get_profile_information::v1::ProfileField}, events::{ direct::DirectEvent, ignored_user_list::IgnoredUserListEvent, - room::{create::RoomCreateEventContent, member::MembershipState}, + room::{ + create::RoomCreateEventContent, + member::{MembershipState, RoomMemberEventContent}, + }, AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, RoomAccountDataEventType, StateEventType, }, @@ -24,19 +28,43 @@ pub struct Service { impl Service { /// Update current membership data. #[tracing::instrument(skip(self, last_state))] - pub fn update_membership( + pub async fn update_membership( &self, room_id: &RoomId, user_id: &UserId, - membership: MembershipState, + membership_event: RoomMemberEventContent, sender: &UserId, last_state: Option>>, update_joined_count: bool, ) -> Result<()> { + let membership = membership_event.membership; // Keep track what remote users exist by adding them as "deactivated" users if user_id.server_name() != services().globals.server_name() { services().users.create(user_id, None)?; - // TODO: displayname, avatar url + // Try to update our local copy of the user if ours does not match + if ((services().users.displayname(user_id)? != membership_event.displayname) + || (services().users.avatar_url(user_id)? != membership_event.avatar_url) + || (services().users.blurhash(user_id)? != membership_event.blurhash)) + && (membership != MembershipState::Leave) + { + let response = services() + .sending + .send_federation_request( + user_id.server_name(), + federation::query::get_profile_information::v1::Request { + user_id: user_id.into(), + field: Some(ProfileField::AvatarUrl), + }, + ) + .await?; + let _ = services() + .users + .set_displayname(user_id, response.displayname.clone()); + let _ = services() + .users + .set_avatar_url(user_id, response.avatar_url); + let _ = services().users.set_blurhash(user_id, response.blurhash); + }; } match &membership { diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 34399d46..d34e80f9 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -15,7 +15,8 @@ use ruma::{ events::{ push_rules::PushRulesEvent, room::{ - create::RoomCreateEventContent, member::MembershipState, + create::RoomCreateEventContent, + member::{MembershipState, RoomMemberEventContent}, power_levels::RoomPowerLevelsEventContent, }, GlobalAccountDataEventType, RoomEventType, StateEventType, @@ -145,7 +146,7 @@ impl Service { /// /// Returns pdu id #[tracing::instrument(skip(self, pdu, pdu_json, leaves))] - pub fn append_pdu<'a>( + pub async fn append_pdu<'a>( &self, pdu: &PduEvent, mut pdu_json: CanonicalJsonObject, @@ -211,7 +212,7 @@ impl Service { .entry(pdu.room_id.clone()) .or_default(), ); - let insert_lock = mutex_insert.lock().unwrap(); + let insert_lock = mutex_insert.lock().await; let count1 = services().globals.next_count()?; // Mark as read first so the sending client doesn't get a notification even if appending @@ -323,16 +324,11 @@ impl Service { } RoomEventType::RoomMember => { if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - // if the state_key fails let target_user_id = UserId::parse(state_key.clone()) .expect("This state_key was previously validated"); - let content = serde_json::from_str::(pdu.content.get()) + let content = serde_json::from_str::(pdu.content.get()) .map_err(|_| Error::bad_database("Invalid content in pdu."))?; let invite_state = match content.membership { @@ -345,14 +341,18 @@ impl Service { // Update our membership info, we do this here incase a user is invited // and immediately leaves we need the DB to record the invite event for auth - services().rooms.state_cache.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - true, - )?; + services() + .rooms + .state_cache + .update_membership( + &pdu.room_id, + &target_user_id, + content, + &pdu.sender, + invite_state, + true, + ) + .await?; } } RoomEventType::RoomMessage => { @@ -673,7 +673,7 @@ impl Service { /// Creates a new persisted data unit and adds it to a room. This function takes a /// roomid_mutex_state, meaning that only this function is able to mutate the room state. #[tracing::instrument(skip(self, state_lock))] - pub fn build_and_append_pdu( + pub async fn build_and_append_pdu( &self, pdu_builder: PduBuilder, sender: &UserId, @@ -687,14 +687,16 @@ impl Service { // pdu without it's state. This is okay because append_pdu can't fail. let statehashid = services().rooms.state.append_to_state(&pdu)?; - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - vec![(*pdu.event_id).to_owned()], - state_lock, - )?; + let pdu_id = self + .append_pdu( + &pdu, + pdu_json, + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + vec![(*pdu.event_id).to_owned()], + state_lock, + ) + .await?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist @@ -732,7 +734,7 @@ impl Service { /// Append the incoming event setting the state snapshot to the state from the /// server that sent the event. #[tracing::instrument(skip_all)] - pub fn append_incoming_pdu<'a>( + pub async fn append_incoming_pdu<'a>( &self, pdu: &PduEvent, pdu_json: CanonicalJsonObject, @@ -762,11 +764,11 @@ impl Service { return Ok(None); } - let pdu_id = - services() - .rooms - .timeline - .append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; + let pdu_id = services() + .rooms + .timeline + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock) + .await?; Ok(Some(pdu_id)) } From f458916919375674124d120f2618c928479fe4cd Mon Sep 17 00:00:00 2001 From: Nyaaori <+@nyaaori.cat> Date: Wed, 21 Dec 2022 17:46:01 +0100 Subject: [PATCH 53/53] fix: Do not allow fetching cached remote users' profiles over federation --- src/api/server_server.rs | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/api/server_server.rs b/src/api/server_server.rs index 72f6cae7..7f14c4a7 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -1716,6 +1716,13 @@ pub async fn get_profile_information_route( return Err(Error::bad_config("Federation is disabled.")); } + if body.user_id.server_name() != services().globals.server_name() { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "User does not belong to this server", + )); + } + let mut displayname = None; let mut avatar_url = None; let mut blurhash = None;