improvement: better memory usage and admin commands to analyze it

merge-requests/508/head
Timo Kösters 1 year ago
parent bac13d08ae
commit a2c3256ced
No known key found for this signature in database
GPG Key ID: 0B25E636FBA7E4CB

@ -105,7 +105,7 @@ async-trait = "0.1.68"
sd-notify = { version = "0.4.1", optional = true } sd-notify = { version = "0.4.1", optional = true }
[features] [features]
default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc", "systemd"] default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"]
#backend_sled = ["sled"] #backend_sled = ["sled"]
backend_persy = ["persy", "parking_lot"] backend_persy = ["persy", "parking_lot"]
backend_sqlite = ["sqlite"] backend_sqlite = ["sqlite"]

@ -653,213 +653,133 @@ async fn load_joined_room(
.user .user
.get_token_shortstatehash(&room_id, since)?; .get_token_shortstatehash(&room_id, since)?;
// Calculates joined_member_count, invited_member_count and heroes
let calculate_counts = || {
let joined_member_count = services()
.rooms
.state_cache
.room_joined_count(&room_id)?
.unwrap_or(0);
let invited_member_count = services()
.rooms
.state_cache
.room_invited_count(&room_id)?
.unwrap_or(0);
// Recalculate heroes (first 5 members)
let mut heroes = Vec::new();
if joined_member_count + invited_member_count <= 5 {
// Go through all PDUs and for each member event, check if the user is still joined or
// invited until we have 5 or we reach the end
for hero in services()
.rooms
.timeline
.all_pdus(&sender_user, &room_id)?
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
.map(|(_, pdu)| {
let content: RoomMemberEventContent = serde_json::from_str(pdu.content.get())
.map_err(|_| {
Error::bad_database("Invalid member event in database.")
})?;
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone())
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
// The membership was and still is invite or join
if matches!(
content.membership,
MembershipState::Join | MembershipState::Invite
) && (services().rooms.state_cache.is_joined(&user_id, &room_id)?
|| services()
.rooms
.state_cache
.is_invited(&user_id, &room_id)?)
{
Ok::<_, Error>(Some(state_key.clone()))
} else {
Ok(None)
}
} else {
Ok(None)
}
})
// Filter out buggy users
.filter_map(|u| u.ok())
// Filter for possible heroes
.flatten()
{
if heroes.contains(&hero) || hero == sender_user.as_str() {
continue;
}
heroes.push(hero);
}
}
Ok::<_, Error>((
Some(joined_member_count),
Some(invited_member_count),
heroes,
))
};
let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
.and_then(|shortstatehash| {
services()
.rooms
.state_accessor
.state_get(
shortstatehash,
&StateEventType::RoomMember,
sender_user.as_str(),
)
.transpose()
})
.transpose()?
.and_then(|pdu| {
serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
});
let joined_since_last_sync =
since_sender_member.map_or(true, |member| member.membership != MembershipState::Join);
let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) =
if since_shortstatehash.is_none() || joined_since_last_sync { if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
// Probably since = 0, we will do an initial sync // No state changes
(Vec::new(), None, None, false, Vec::new())
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; } else {
// Calculates joined_member_count, invited_member_count and heroes
let current_state_ids = services() let calculate_counts = || {
.rooms let joined_member_count = services()
.state_accessor .rooms
.state_full_ids(current_shortstatehash) .state_cache
.await?; .room_joined_count(&room_id)?
.unwrap_or(0);
let invited_member_count = services()
.rooms
.state_cache
.room_invited_count(&room_id)?
.unwrap_or(0);
let mut state_events = Vec::new(); // Recalculate heroes (first 5 members)
let mut lazy_loaded = HashSet::new(); let mut heroes = Vec::new();
let mut i = 0; if joined_member_count + invited_member_count <= 5 {
for (shortstatekey, id) in current_state_ids { // Go through all PDUs and for each member event, check if the user is still joined or
let (event_type, state_key) = services() // invited until we have 5 or we reach the end
.rooms
.short
.get_statekey_from_short(shortstatekey)?;
if event_type != StateEventType::RoomMember { for hero in services()
let pdu = match services().rooms.timeline.get_pdu(&id)? { .rooms
Some(pdu) => pdu, .timeline
None => { .all_pdus(&sender_user, &room_id)?
error!("Pdu in state not found: {}", id); .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
.filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember)
.map(|(_, pdu)| {
let content: RoomMemberEventContent =
serde_json::from_str(pdu.content.get()).map_err(|_| {
Error::bad_database("Invalid member event in database.")
})?;
if let Some(state_key) = &pdu.state_key {
let user_id = UserId::parse(state_key.clone()).map_err(|_| {
Error::bad_database("Invalid UserId in member PDU.")
})?;
// The membership was and still is invite or join
if matches!(
content.membership,
MembershipState::Join | MembershipState::Invite
) && (services()
.rooms
.state_cache
.is_joined(&user_id, &room_id)?
|| services()
.rooms
.state_cache
.is_invited(&user_id, &room_id)?)
{
Ok::<_, Error>(Some(state_key.clone()))
} else {
Ok(None)
}
} else {
Ok(None)
}
})
// Filter out buggy users
.filter_map(|u| u.ok())
// Filter for possible heroes
.flatten()
{
if heroes.contains(&hero) || hero == sender_user.as_str() {
continue; continue;
} }
};
state_events.push(pdu);
i += 1; heroes.push(hero);
if i % 100 == 0 {
tokio::task::yield_now().await;
} }
} else if !lazy_load_enabled }
|| full_state
|| timeline_users.contains(&state_key)
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|| *sender_user == state_key
{
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
};
// This check is in case a bad user ID made it into the database Ok::<_, Error>((
if let Ok(uid) = UserId::parse(&state_key) { Some(joined_member_count),
lazy_loaded.insert(uid); Some(invited_member_count),
} heroes,
state_events.push(pdu); ))
};
i += 1; let since_sender_member: Option<RoomMemberEventContent> = since_shortstatehash
if i % 100 == 0 { .and_then(|shortstatehash| {
tokio::task::yield_now().await; services()
} .rooms
} .state_accessor
} .state_get(
shortstatehash,
&StateEventType::RoomMember,
sender_user.as_str(),
)
.transpose()
})
.transpose()?
.and_then(|pdu| {
serde_json::from_str(pdu.content.get())
.map_err(|_| Error::bad_database("Invalid PDU in database."))
.ok()
});
// Reset lazy loading because this is an initial sync let joined_since_last_sync = since_sender_member
services().rooms.lazy_loading.lazy_load_reset( .map_or(true, |member| member.membership != MembershipState::Join);
&sender_user,
&sender_device,
&room_id,
)?;
// The state_events above should contain all timeline_users, let's mark them as lazy
// loaded.
services().rooms.lazy_loading.lazy_load_mark_sent(
&sender_user,
&sender_device,
&room_id,
lazy_loaded,
next_batchcount,
);
( if since_shortstatehash.is_none() || joined_since_last_sync {
heroes, // Probably since = 0, we will do an initial sync
joined_member_count,
invited_member_count,
true,
state_events,
)
} else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
// No state changes
(Vec::new(), None, None, false, Vec::new())
} else {
// Incremental /sync
let since_shortstatehash = since_shortstatehash.unwrap();
let mut state_events = Vec::new(); let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
let mut lazy_loaded = HashSet::new();
if since_shortstatehash != current_shortstatehash {
let current_state_ids = services() let current_state_ids = services()
.rooms .rooms
.state_accessor .state_accessor
.state_full_ids(current_shortstatehash) .state_full_ids(current_shortstatehash)
.await?; .await?;
let since_state_ids = services()
.rooms
.state_accessor
.state_full_ids(since_shortstatehash)
.await?;
for (key, id) in current_state_ids { let mut state_events = Vec::new();
if full_state || since_state_ids.get(&key) != Some(&id) { let mut lazy_loaded = HashSet::new();
let mut i = 0;
for (shortstatekey, id) in current_state_ids {
let (event_type, state_key) = services()
.rooms
.short
.get_statekey_from_short(shortstatekey)?;
if event_type != StateEventType::RoomMember {
let pdu = match services().rooms.timeline.get_pdu(&id)? { let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu, Some(pdu) => pdu,
None => { None => {
@ -867,146 +787,234 @@ async fn load_joined_room(
continue; continue;
} }
}; };
state_events.push(pdu);
if pdu.kind == TimelineEventType::RoomMember { i += 1;
match UserId::parse( if i % 100 == 0 {
pdu.state_key tokio::task::yield_now().await;
.as_ref()
.expect("State event has state key")
.clone(),
) {
Ok(state_key_userid) => {
lazy_loaded.insert(state_key_userid);
}
Err(e) => error!("Invalid state key for member event: {}", e),
}
} }
} else if !lazy_load_enabled
|| full_state
|| timeline_users.contains(&state_key)
// TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565
|| *sender_user == state_key
{
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
};
// This check is in case a bad user ID made it into the database
if let Ok(uid) = UserId::parse(&state_key) {
lazy_loaded.insert(uid);
}
state_events.push(pdu); state_events.push(pdu);
tokio::task::yield_now().await;
i += 1;
if i % 100 == 0 {
tokio::task::yield_now().await;
}
} }
} }
}
for (_, event) in &timeline_pdus { // Reset lazy loading because this is an initial sync
if lazy_loaded.contains(&event.sender) { services().rooms.lazy_loading.lazy_load_reset(
continue; &sender_user,
} &sender_device,
&room_id,
)?;
if !services().rooms.lazy_loading.lazy_load_was_sent_before( // The state_events above should contain all timeline_users, let's mark them as lazy
// loaded.
services().rooms.lazy_loading.lazy_load_mark_sent(
&sender_user, &sender_user,
&sender_device, &sender_device,
&room_id, &room_id,
&event.sender, lazy_loaded,
)? || lazy_load_send_redundant next_batchcount,
{ );
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
&room_id, (
&StateEventType::RoomMember, heroes,
event.sender.as_str(), joined_member_count,
)? { invited_member_count,
lazy_loaded.insert(event.sender.clone()); true,
state_events.push(member_event); state_events,
)
} else {
// Incremental /sync
let since_shortstatehash = since_shortstatehash.unwrap();
let mut state_events = Vec::new();
let mut lazy_loaded = HashSet::new();
if since_shortstatehash != current_shortstatehash {
let current_state_ids = services()
.rooms
.state_accessor
.state_full_ids(current_shortstatehash)
.await?;
let since_state_ids = services()
.rooms
.state_accessor
.state_full_ids(since_shortstatehash)
.await?;
for (key, id) in current_state_ids {
if full_state || since_state_ids.get(&key) != Some(&id) {
let pdu = match services().rooms.timeline.get_pdu(&id)? {
Some(pdu) => pdu,
None => {
error!("Pdu in state not found: {}", id);
continue;
}
};
if pdu.kind == TimelineEventType::RoomMember {
match UserId::parse(
pdu.state_key
.as_ref()
.expect("State event has state key")
.clone(),
) {
Ok(state_key_userid) => {
lazy_loaded.insert(state_key_userid);
}
Err(e) => error!("Invalid state key for member event: {}", e),
}
}
state_events.push(pdu);
tokio::task::yield_now().await;
}
} }
} }
}
services().rooms.lazy_loading.lazy_load_mark_sent( for (_, event) in &timeline_pdus {
&sender_user, if lazy_loaded.contains(&event.sender) {
&sender_device, continue;
&room_id, }
lazy_loaded,
next_batchcount,
);
let encrypted_room = services() if !services().rooms.lazy_loading.lazy_load_was_sent_before(
.rooms &sender_user,
.state_accessor &sender_device,
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? &room_id,
.is_some(); &event.sender,
)? || lazy_load_send_redundant
{
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
&room_id,
&StateEventType::RoomMember,
event.sender.as_str(),
)? {
lazy_loaded.insert(event.sender.clone());
state_events.push(member_event);
}
}
}
let since_encryption = services().rooms.state_accessor.state_get( services().rooms.lazy_loading.lazy_load_mark_sent(
since_shortstatehash, &sender_user,
&StateEventType::RoomEncryption, &sender_device,
"", &room_id,
)?; lazy_loaded,
next_batchcount,
);
// Calculations: let encrypted_room = services()
let new_encrypted_room = encrypted_room && since_encryption.is_none(); .rooms
.state_accessor
.state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")?
.is_some();
let send_member_count = state_events let since_encryption = services().rooms.state_accessor.state_get(
.iter() since_shortstatehash,
.any(|event| event.kind == TimelineEventType::RoomMember); &StateEventType::RoomEncryption,
"",
)?;
if encrypted_room { // Calculations:
for state_event in &state_events { let new_encrypted_room = encrypted_room && since_encryption.is_none();
if state_event.kind != TimelineEventType::RoomMember {
continue;
}
if let Some(state_key) = &state_event.state_key { let send_member_count = state_events
let user_id = UserId::parse(state_key.clone()) .iter()
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; .any(|event| event.kind == TimelineEventType::RoomMember);
if user_id == sender_user { if encrypted_room {
for state_event in &state_events {
if state_event.kind != TimelineEventType::RoomMember {
continue; continue;
} }
let new_membership = serde_json::from_str::<RoomMemberEventContent>( if let Some(state_key) = &state_event.state_key {
state_event.content.get(), let user_id = UserId::parse(state_key.clone()).map_err(|_| {
) Error::bad_database("Invalid UserId in member PDU.")
.map_err(|_| Error::bad_database("Invalid PDU in database."))? })?;
.membership;
if user_id == sender_user {
match new_membership { continue;
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
device_list_updates.insert(user_id);
}
} }
MembershipState::Leave => {
// Write down users that have left encrypted rooms we are in let new_membership = serde_json::from_str::<RoomMemberEventContent>(
left_encrypted_users.insert(user_id); state_event.content.get(),
)
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
.membership;
match new_membership {
MembershipState::Join => {
// A new user joined an encrypted room
if !share_encrypted_room(&sender_user, &user_id, &room_id)? {
device_list_updates.insert(user_id);
}
}
MembershipState::Leave => {
// Write down users that have left encrypted rooms we are in
left_encrypted_users.insert(user_id);
}
_ => {}
} }
_ => {}
} }
} }
} }
}
if joined_since_last_sync && encrypted_room || new_encrypted_room {
// If the user is in a new encrypted room, give them all joined users
device_list_updates.extend(
services()
.rooms
.state_cache
.room_members(&room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
&sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false)
}),
);
}
let (joined_member_count, invited_member_count, heroes) = if send_member_count { if joined_since_last_sync && encrypted_room || new_encrypted_room {
calculate_counts()? // If the user is in a new encrypted room, give them all joined users
} else { device_list_updates.extend(
(None, None, Vec::new()) services()
}; .rooms
.state_cache
.room_members(&room_id)
.flatten()
.filter(|user_id| {
// Don't send key updates from the sender to the sender
&sender_user != user_id
})
.filter(|user_id| {
// Only send keys if the sender doesn't share an encrypted room with the target already
!share_encrypted_room(&sender_user, user_id, &room_id)
.unwrap_or(false)
}),
);
}
( let (joined_member_count, invited_member_count, heroes) = if send_member_count {
heroes, calculate_counts()?
joined_member_count, } else {
invited_member_count, (None, None, Vec::new())
joined_since_last_sync, };
state_events,
) (
heroes,
joined_member_count,
invited_member_count,
joined_since_last_sync,
state_events,
)
}
}; };
// Look for device list updates in this room // Look for device list updates in this room

@ -224,7 +224,7 @@ fn default_database_backend() -> String {
} }
fn default_db_cache_capacity_mb() -> f64 { fn default_db_cache_capacity_mb() -> f64 {
1000.0 300.0
} }
fn default_conduit_cache_capacity_modifier() -> f64 { fn default_conduit_cache_capacity_modifier() -> f64 {

@ -38,6 +38,7 @@ pub trait KeyValueDatabaseEngine: Send + Sync {
fn memory_usage(&self) -> Result<String> { fn memory_usage(&self) -> Result<String> {
Ok("Current database engine does not support memory usage reporting.".to_owned()) Ok("Current database engine does not support memory usage reporting.".to_owned())
} }
fn clear_caches(&self) {}
} }
pub trait KvTree: Send + Sync { pub trait KvTree: Send + Sync {

@ -45,6 +45,10 @@ fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::O
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
db_opts.optimize_level_style_compaction(10 * 1024 * 1024); db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
// https://github.com/facebook/rocksdb/wiki/Setup-Options-and-Basic-Tuning
db_opts.set_max_background_jobs(6);
db_opts.set_bytes_per_sync(1048576);
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
db_opts.set_prefix_extractor(prefix_extractor); db_opts.set_prefix_extractor(prefix_extractor);
@ -121,6 +125,8 @@ impl KeyValueDatabaseEngine for Arc<Engine> {
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
)) ))
} }
fn clear_caches(&self) {}
} }
impl RocksDbEngineTree<'_> { impl RocksDbEngineTree<'_> {

@ -118,8 +118,59 @@ impl service::globals::Data for KeyValueDatabase {
self._db.cleanup() self._db.cleanup()
} }
fn memory_usage(&self) -> Result<String> { fn memory_usage(&self) -> String {
self._db.memory_usage() let pdu_cache = self.pdu_cache.lock().unwrap().len();
let shorteventid_cache = self.shorteventid_cache.lock().unwrap().len();
let auth_chain_cache = self.auth_chain_cache.lock().unwrap().len();
let eventidshort_cache = self.eventidshort_cache.lock().unwrap().len();
let statekeyshort_cache = self.statekeyshort_cache.lock().unwrap().len();
let our_real_users_cache = self.our_real_users_cache.read().unwrap().len();
let appservice_in_room_cache = self.appservice_in_room_cache.read().unwrap().len();
let lasttimelinecount_cache = self.lasttimelinecount_cache.lock().unwrap().len();
let mut response = format!(
"\
pdu_cache: {pdu_cache}
shorteventid_cache: {shorteventid_cache}
auth_chain_cache: {auth_chain_cache}
eventidshort_cache: {eventidshort_cache}
statekeyshort_cache: {statekeyshort_cache}
our_real_users_cache: {our_real_users_cache}
appservice_in_room_cache: {appservice_in_room_cache}
lasttimelinecount_cache: {lasttimelinecount_cache}\n"
);
if let Ok(db_stats) = self._db.memory_usage() {
response += &db_stats;
}
response
}
fn clear_caches(&self, amount: u32) {
if amount > 0 {
self.pdu_cache.lock().unwrap().clear();
}
if amount > 1 {
self.shorteventid_cache.lock().unwrap().clear();
}
if amount > 2 {
self.auth_chain_cache.lock().unwrap().clear();
}
if amount > 3 {
self.eventidshort_cache.lock().unwrap().clear();
}
if amount > 4 {
self.statekeyshort_cache.lock().unwrap().clear();
}
if amount > 5 {
self.our_real_users_cache.write().unwrap().clear();
}
if amount > 6 {
self.appservice_in_room_cache.write().unwrap().clear();
}
if amount > 7 {
self.lasttimelinecount_cache.lock().unwrap().clear();
}
} }
fn load_keypair(&self) -> Result<Ed25519KeyPair> { fn load_keypair(&self) -> Result<Ed25519KeyPair> {

@ -134,7 +134,13 @@ enum AdminCommand {
}, },
/// Print database memory usage statistics /// Print database memory usage statistics
DatabaseMemoryUsage, MemoryUsage,
/// Clears all of Conduit's database caches with index smaller than the amount
ClearDatabaseCaches { amount: u32 },
/// Clears all of Conduit's service caches with index smaller than the amount
ClearServiceCaches { amount: u32 },
/// Show configuration values /// Show configuration values
ShowConfig, ShowConfig,
@ -531,12 +537,24 @@ impl Service {
None => RoomMessageEventContent::text_plain("PDU not found."), None => RoomMessageEventContent::text_plain("PDU not found."),
} }
} }
AdminCommand::DatabaseMemoryUsage => match services().globals.db.memory_usage() { AdminCommand::MemoryUsage => {
Ok(response) => RoomMessageEventContent::text_plain(response), let response1 = services().memory_usage();
Err(e) => RoomMessageEventContent::text_plain(format!( let response2 = services().globals.db.memory_usage();
"Failed to get database memory usage: {e}"
)), RoomMessageEventContent::text_plain(format!(
}, "Services:\n{response1}\n\nDatabase:\n{response2}"
))
}
AdminCommand::ClearDatabaseCaches { amount } => {
services().globals.db.clear_caches(amount);
RoomMessageEventContent::text_plain("Done.")
}
AdminCommand::ClearServiceCaches { amount } => {
services().clear_caches(amount);
RoomMessageEventContent::text_plain("Done.")
}
AdminCommand::ShowConfig => { AdminCommand::ShowConfig => {
// Construct and send the response // Construct and send the response
RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) RoomMessageEventContent::text_plain(format!("{}", services().globals.config))

@ -15,7 +15,8 @@ pub trait Data: Send + Sync {
fn current_count(&self) -> Result<u64>; fn current_count(&self) -> Result<u64>;
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>;
fn cleanup(&self) -> Result<()>; fn cleanup(&self) -> Result<()>;
fn memory_usage(&self) -> Result<String>; fn memory_usage(&self) -> String;
fn clear_caches(&self, amount: u32);
fn load_keypair(&self) -> Result<Ed25519KeyPair>; fn load_keypair(&self) -> Result<Ed25519KeyPair>;
fn remove_keypair(&self) -> Result<()>; fn remove_keypair(&self) -> Result<()>;
fn add_signing_key( fn add_signing_key(

@ -214,10 +214,6 @@ impl Service {
self.db.cleanup() self.db.cleanup()
} }
pub fn memory_usage(&self) -> Result<String> {
self.db.memory_usage()
}
pub fn server_name(&self) -> &ServerName { pub fn server_name(&self) -> &ServerName {
self.config.server_name.as_ref() self.config.server_name.as_ref()
} }

@ -90,7 +90,7 @@ impl Services {
state_compressor: rooms::state_compressor::Service { state_compressor: rooms::state_compressor::Service {
db, db,
stateinfo_cache: Mutex::new(LruCache::new( stateinfo_cache: Mutex::new(LruCache::new(
(300.0 * config.conduit_cache_capacity_modifier) as usize, (100.0 * config.conduit_cache_capacity_modifier) as usize,
)), )),
}, },
timeline: rooms::timeline::Service { timeline: rooms::timeline::Service {
@ -115,4 +115,109 @@ impl Services {
globals: globals::Service::load(db, config)?, globals: globals::Service::load(db, config)?,
}) })
} }
fn memory_usage(&self) -> String {
let lazy_load_waiting = self
.rooms
.lazy_loading
.lazy_load_waiting
.lock()
.unwrap()
.len();
let server_visibility_cache = self
.rooms
.state_accessor
.server_visibility_cache
.lock()
.unwrap()
.len();
let user_visibility_cache = self
.rooms
.state_accessor
.user_visibility_cache
.lock()
.unwrap()
.len();
let stateinfo_cache = self
.rooms
.state_compressor
.stateinfo_cache
.lock()
.unwrap()
.len();
let lasttimelinecount_cache = self
.rooms
.timeline
.lasttimelinecount_cache
.lock()
.unwrap()
.len();
let roomid_spacechunk_cache = self
.rooms
.spaces
.roomid_spacechunk_cache
.lock()
.unwrap()
.len();
format!(
"\
lazy_load_waiting: {lazy_load_waiting}
server_visibility_cache: {server_visibility_cache}
user_visibility_cache: {user_visibility_cache}
stateinfo_cache: {stateinfo_cache}
lasttimelinecount_cache: {lasttimelinecount_cache}
roomid_spacechunk_cache: {roomid_spacechunk_cache}\
"
)
}
fn clear_caches(&self, amount: u32) {
if amount > 0 {
self.rooms
.lazy_loading
.lazy_load_waiting
.lock()
.unwrap()
.clear();
}
if amount > 1 {
self.rooms
.state_accessor
.server_visibility_cache
.lock()
.unwrap()
.clear();
}
if amount > 2 {
self.rooms
.state_accessor
.user_visibility_cache
.lock()
.unwrap()
.clear();
}
if amount > 3 {
self.rooms
.state_compressor
.stateinfo_cache
.lock()
.unwrap()
.clear();
}
if amount > 4 {
self.rooms
.timeline
.lasttimelinecount_cache
.lock()
.unwrap()
.clear();
}
if amount > 5 {
self.rooms
.spaces
.roomid_spacechunk_cache
.lock()
.unwrap()
.clear();
}
}
} }

Loading…
Cancel
Save