mirror of https://gitlab.com/famedly/conduit
Merge branch 'conduit-next' into next
commit
13052388a7
@ -0,0 +1,11 @@
|
|||||||
|
{
|
||||||
|
"recommendations": [
|
||||||
|
"rust-lang.rust-analyzer",
|
||||||
|
"bungcip.better-toml",
|
||||||
|
"ms-azuretools.vscode-docker",
|
||||||
|
"eamodio.gitlens",
|
||||||
|
"serayuzgur.crates",
|
||||||
|
"vadimcn.vscode-lldb",
|
||||||
|
"timonwong.shellcheck"
|
||||||
|
]
|
||||||
|
}
|
@ -0,0 +1,35 @@
|
|||||||
|
{
|
||||||
|
// Use IntelliSense to learn about possible attributes.
|
||||||
|
// Hover to view descriptions of existing attributes.
|
||||||
|
// For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387
|
||||||
|
"version": "0.2.0",
|
||||||
|
"configurations": [
|
||||||
|
{
|
||||||
|
"type": "lldb",
|
||||||
|
"request": "launch",
|
||||||
|
"name": "Debug conduit",
|
||||||
|
"sourceLanguages": ["rust"],
|
||||||
|
"cargo": {
|
||||||
|
"args": [
|
||||||
|
"build",
|
||||||
|
"--bin=conduit",
|
||||||
|
"--package=conduit"
|
||||||
|
],
|
||||||
|
"filter": {
|
||||||
|
"name": "conduit",
|
||||||
|
"kind": "bin"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"args": [],
|
||||||
|
"env": {
|
||||||
|
"RUST_BACKTRACE": "1",
|
||||||
|
"CONDUIT_CONFIG": "",
|
||||||
|
"CONDUIT_SERVER_NAME": "localhost",
|
||||||
|
"CONDUIT_DATABASE_PATH": "/tmp",
|
||||||
|
"CONDUIT_ADDRESS": "0.0.0.0",
|
||||||
|
"CONDUIT_PORT": "6167"
|
||||||
|
},
|
||||||
|
"cwd": "${workspaceFolder}"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
@ -1,3 +1,3 @@
|
|||||||
{
|
{
|
||||||
"rust-analyzer.procMacro.enable": true
|
"rust-analyzer.procMacro.enable": true,
|
||||||
}
|
}
|
@ -1,11 +0,0 @@
|
|||||||
Install docker:
|
|
||||||
|
|
||||||
```
|
|
||||||
$ sudo apt install docker
|
|
||||||
$ sudo usermod -aG docker $USER
|
|
||||||
$ exec sudo su -l $USER
|
|
||||||
$ sudo systemctl start docker
|
|
||||||
$ cargo install cross
|
|
||||||
$ cross build --release --target armv7-unknown-linux-musleabihf
|
|
||||||
```
|
|
||||||
The cross-compiled binary is at target/armv7-unknown-linux-musleabihf/release/conduit
|
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,23 @@
|
|||||||
|
[build.env]
|
||||||
|
# CI uses an S3 endpoint to store sccache artifacts, so their config needs to
|
||||||
|
# be available in the cross container as well
|
||||||
|
passthrough = [
|
||||||
|
"RUSTC_WRAPPER",
|
||||||
|
"AWS_ACCESS_KEY_ID",
|
||||||
|
"AWS_SECRET_ACCESS_KEY",
|
||||||
|
"SCCACHE_BUCKET",
|
||||||
|
"SCCACHE_ENDPOINT",
|
||||||
|
"SCCACHE_S3_USE_SSL",
|
||||||
|
]
|
||||||
|
|
||||||
|
[target.aarch64-unknown-linux-musl]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest"
|
||||||
|
|
||||||
|
[target.arm-unknown-linux-musleabihf]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest"
|
||||||
|
|
||||||
|
[target.armv7-unknown-linux-musleabihf]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest"
|
||||||
|
|
||||||
|
[target.x86_64-unknown-linux-musl]
|
||||||
|
image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl@sha256:b6d689e42f0236c8a38b961bca2a12086018b85ed20e0826310421daf182e2bb"
|
@ -1,97 +1,84 @@
|
|||||||
# Using multistage build:
|
# syntax=docker/dockerfile:1
|
||||||
# https://docs.docker.com/develop/develop-images/multistage-build/
|
FROM docker.io/rust:1.63-bullseye AS builder
|
||||||
# https://whitfin.io/speeding-up-rust-docker-builds/
|
WORKDIR /usr/src/conduit
|
||||||
|
|
||||||
|
# Install required packages to build Conduit and it's dependencies
|
||||||
########################## BUILD IMAGE ##########################
|
RUN apt-get update && \
|
||||||
# Alpine build image to build Conduit's statically compiled binary
|
apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5
|
||||||
FROM alpine:3.14 as builder
|
|
||||||
|
# == Build dependencies without our own code separately for caching ==
|
||||||
# Install packages needed for building all crates
|
#
|
||||||
RUN apk add --no-cache \
|
# Need a fake main.rs since Cargo refuses to build anything otherwise.
|
||||||
cargo \
|
#
|
||||||
openssl-dev
|
# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature
|
||||||
|
# request that would allow just dependencies to be compiled, presumably
|
||||||
# Specifies if the local project is build or if Conduit gets build
|
# regardless of whether source files are available.
|
||||||
# from the official git repository. Defaults to the git repo.
|
RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs
|
||||||
ARG LOCAL=false
|
COPY Cargo.toml Cargo.lock ./
|
||||||
# Specifies which revision/commit is build. Defaults to HEAD
|
RUN cargo build --release && rm -r src
|
||||||
ARG GIT_REF=origin/master
|
|
||||||
|
# Copy over actual Conduit sources
|
||||||
# Copy project files from current folder
|
COPY src src
|
||||||
COPY . .
|
|
||||||
# Build it from the copied local files or from the official git repository
|
# main.rs and lib.rs need their timestamp updated for this to work correctly since
|
||||||
RUN if [[ $LOCAL == "true" ]]; then \
|
# otherwise the build with the fake main.rs from above is newer than the
|
||||||
mv ./docker/healthcheck.sh . ; \
|
# source files (COPY preserves timestamps).
|
||||||
echo "Building from local source..." ; \
|
#
|
||||||
cargo install --path . ; \
|
# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit
|
||||||
else \
|
RUN touch src/main.rs && touch src/lib.rs && cargo build --release
|
||||||
echo "Building revision '${GIT_REF}' from online source..." ; \
|
|
||||||
cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF} ; \
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
echo "Loadings healthcheck script from online source..." ; \
|
# Stuff below this line actually ends up in the resulting docker image
|
||||||
wget "https://gitlab.com/famedly/conduit/-/raw/${GIT_REF#origin/}/docker/healthcheck.sh" ; \
|
# ---------------------------------------------------------------------------------------------------------------
|
||||||
fi
|
FROM docker.io/debian:bullseye-slim AS runner
|
||||||
|
|
||||||
########################## RUNTIME IMAGE ##########################
|
# Standard port on which Conduit launches.
|
||||||
# Create new stage with a minimal image for the actual
|
# You still need to map the port when using the docker command or docker-compose.
|
||||||
# runtime image/container
|
|
||||||
FROM alpine:3.14
|
|
||||||
|
|
||||||
ARG CREATED
|
|
||||||
ARG VERSION
|
|
||||||
ARG GIT_REF=origin/master
|
|
||||||
|
|
||||||
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
|
|
||||||
|
|
||||||
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
|
||||||
# including a custom label specifying the build command
|
|
||||||
LABEL org.opencontainers.image.created=${CREATED} \
|
|
||||||
org.opencontainers.image.authors="Conduit Contributors" \
|
|
||||||
org.opencontainers.image.title="Conduit" \
|
|
||||||
org.opencontainers.image.version=${VERSION} \
|
|
||||||
org.opencontainers.image.vendor="Conduit Contributors" \
|
|
||||||
org.opencontainers.image.description="A Matrix homeserver written in Rust" \
|
|
||||||
org.opencontainers.image.url="https://conduit.rs/" \
|
|
||||||
org.opencontainers.image.revision=${GIT_REF} \
|
|
||||||
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
|
|
||||||
org.opencontainers.image.licenses="Apache-2.0" \
|
|
||||||
org.opencontainers.image.documentation="" \
|
|
||||||
org.opencontainers.image.ref.name="" \
|
|
||||||
org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \
|
|
||||||
maintainer="Weasy666"
|
|
||||||
|
|
||||||
# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose.
|
|
||||||
EXPOSE 6167
|
EXPOSE 6167
|
||||||
|
|
||||||
# Copy config files from context and the binary from
|
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
||||||
# the "builder" stage to the current stage into folder
|
|
||||||
# /srv/conduit and create data folder for database
|
|
||||||
RUN mkdir -p /srv/conduit/.local/share/conduit
|
|
||||||
COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/
|
|
||||||
COPY --from=builder ./healthcheck.sh /srv/conduit/
|
|
||||||
|
|
||||||
# Add www-data user and group with UID 82, as used by alpine
|
|
||||||
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
|
|
||||||
RUN set -x ; \
|
|
||||||
addgroup -Sg 82 www-data 2>/dev/null ; \
|
|
||||||
adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \
|
|
||||||
addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1
|
|
||||||
|
|
||||||
# Change ownership of Conduit files to www-data user and group
|
ENV CONDUIT_PORT=6167 \
|
||||||
RUN chown -cR www-data:www-data /srv/conduit
|
CONDUIT_ADDRESS="0.0.0.0" \
|
||||||
|
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
||||||
|
CONDUIT_CONFIG=''
|
||||||
|
# └─> Set no config file to do all configuration with env vars
|
||||||
|
|
||||||
# Install packages needed to run Conduit
|
# Conduit needs:
|
||||||
RUN apk add --no-cache \
|
# ca-certificates: for https
|
||||||
ca-certificates \
|
# iproute2 & wget: for the healthcheck script
|
||||||
curl \
|
RUN apt-get update && apt-get -y --no-install-recommends install \
|
||||||
libgcc
|
ca-certificates \
|
||||||
|
iproute2 \
|
||||||
|
wget \
|
||||||
|
&& rm -rf /var/lib/apt/lists/*
|
||||||
|
|
||||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||||
HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh
|
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||||
|
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||||
|
|
||||||
# Set user to www-data
|
# Copy over the actual Conduit binary from the builder stage
|
||||||
USER www-data
|
COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit
|
||||||
|
|
||||||
|
# Improve security: Don't run stuff as root, that does not need to run as root
|
||||||
|
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
||||||
|
ARG USER_ID=1000
|
||||||
|
ARG GROUP_ID=1000
|
||||||
|
RUN set -x ; \
|
||||||
|
groupadd -r -g ${GROUP_ID} conduit ; \
|
||||||
|
useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1
|
||||||
|
|
||||||
|
# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable:
|
||||||
|
RUN chown -cR conduit:conduit /srv/conduit && \
|
||||||
|
chmod +x /srv/conduit/healthcheck.sh && \
|
||||||
|
mkdir -p ${DEFAULT_DB_PATH} && \
|
||||||
|
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
||||||
|
|
||||||
|
# Change user to conduit, no root permissions afterwards:
|
||||||
|
USER conduit
|
||||||
# Set container home directory
|
# Set container home directory
|
||||||
WORKDIR /srv/conduit
|
WORKDIR /srv/conduit
|
||||||
# Run Conduit
|
|
||||||
|
# Run Conduit and print backtraces on panics
|
||||||
|
ENV RUST_BACKTRACE=1
|
||||||
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
||||||
|
@ -0,0 +1,25 @@
|
|||||||
|
# Setting up TURN/STURN
|
||||||
|
|
||||||
|
## General instructions
|
||||||
|
|
||||||
|
* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md).
|
||||||
|
|
||||||
|
## Edit/Add a few settings to your existing conduit.toml
|
||||||
|
|
||||||
|
```
|
||||||
|
# Refer to your Coturn settings.
|
||||||
|
# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`.
|
||||||
|
turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"]
|
||||||
|
|
||||||
|
# static-auth-secret of your turnserver
|
||||||
|
turn_secret = "ADD SECRET HERE"
|
||||||
|
|
||||||
|
# If you have your TURN server configured to use a username and password
|
||||||
|
# you can provide these information too. In this case comment out `turn_secret above`!
|
||||||
|
#turn_username = ""
|
||||||
|
#turn_password = ""
|
||||||
|
```
|
||||||
|
|
||||||
|
## Apply settings
|
||||||
|
|
||||||
|
Restart Conduit.
|
@ -1,69 +1,84 @@
|
|||||||
|
# syntax=docker/dockerfile:1
|
||||||
# ---------------------------------------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------------------------------------
|
||||||
# This Dockerfile is intended to be built as part of Conduit's CI pipeline.
|
# This Dockerfile is intended to be built as part of Conduit's CI pipeline.
|
||||||
# It does not build Conduit in Docker, but just copies the matching build artifact from the build job.
|
# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs.
|
||||||
# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary.
|
|
||||||
#
|
#
|
||||||
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
|
# It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching.
|
||||||
# Credit's for the original Dockerfile: Weasy666.
|
# Credit's for the original Dockerfile: Weasy666.
|
||||||
# ---------------------------------------------------------------------------------------------------------
|
# ---------------------------------------------------------------------------------------------------------
|
||||||
|
|
||||||
FROM alpine:3.14
|
FROM docker.io/alpine:3.16.0@sha256:4ff3ca91275773af45cb4b0834e12b7eb47d1c18f770a0b151381cd227f4c253 AS runner
|
||||||
|
|
||||||
# Install packages needed to run Conduit
|
|
||||||
|
# Standard port on which Conduit launches.
|
||||||
|
# You still need to map the port when using the docker command or docker-compose.
|
||||||
|
EXPOSE 6167
|
||||||
|
|
||||||
|
# Users are expected to mount a volume to this directory:
|
||||||
|
ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit
|
||||||
|
|
||||||
|
ENV CONDUIT_PORT=6167 \
|
||||||
|
CONDUIT_ADDRESS="0.0.0.0" \
|
||||||
|
CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \
|
||||||
|
CONDUIT_CONFIG=''
|
||||||
|
# └─> Set no config file to do all configuration with env vars
|
||||||
|
|
||||||
|
# Conduit needs:
|
||||||
|
# ca-certificates: for https
|
||||||
|
# iproute2: for `ss` for the healthcheck script
|
||||||
RUN apk add --no-cache \
|
RUN apk add --no-cache \
|
||||||
ca-certificates \
|
ca-certificates \
|
||||||
curl \
|
iproute2
|
||||||
libgcc
|
|
||||||
|
|
||||||
ARG CREATED
|
ARG CREATED
|
||||||
ARG VERSION
|
ARG VERSION
|
||||||
ARG GIT_REF
|
ARG GIT_REF
|
||||||
|
|
||||||
ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml"
|
|
||||||
|
|
||||||
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md
|
||||||
# including a custom label specifying the build command
|
# including a custom label specifying the build command
|
||||||
LABEL org.opencontainers.image.created=${CREATED} \
|
LABEL org.opencontainers.image.created=${CREATED} \
|
||||||
org.opencontainers.image.authors="Conduit Contributors" \
|
org.opencontainers.image.authors="Conduit Contributors" \
|
||||||
org.opencontainers.image.title="Conduit" \
|
org.opencontainers.image.title="Conduit" \
|
||||||
org.opencontainers.image.version=${VERSION} \
|
org.opencontainers.image.version=${VERSION} \
|
||||||
org.opencontainers.image.vendor="Conduit Contributors" \
|
org.opencontainers.image.vendor="Conduit Contributors" \
|
||||||
org.opencontainers.image.description="A Matrix homeserver written in Rust" \
|
org.opencontainers.image.description="A Matrix homeserver written in Rust" \
|
||||||
org.opencontainers.image.url="https://conduit.rs/" \
|
org.opencontainers.image.url="https://conduit.rs/" \
|
||||||
org.opencontainers.image.revision=${GIT_REF} \
|
org.opencontainers.image.revision=${GIT_REF} \
|
||||||
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
|
org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \
|
||||||
org.opencontainers.image.licenses="Apache-2.0" \
|
org.opencontainers.image.licenses="Apache-2.0" \
|
||||||
org.opencontainers.image.documentation="" \
|
org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \
|
||||||
org.opencontainers.image.ref.name=""
|
org.opencontainers.image.ref.name=""
|
||||||
|
|
||||||
# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose.
|
|
||||||
EXPOSE 6167
|
|
||||||
|
|
||||||
# create data folder for database
|
# Test if Conduit is still alive, uses the same endpoint as Element
|
||||||
RUN mkdir -p /srv/conduit/.local/share/conduit
|
COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh
|
||||||
|
HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh
|
||||||
# Copy the Conduit binary into the image at the latest possible moment to maximise caching:
|
|
||||||
COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit
|
|
||||||
COPY ./docker/healthcheck.sh /srv/conduit/
|
|
||||||
|
|
||||||
# Add www-data user and group with UID 82, as used by alpine
|
# Improve security: Don't run stuff as root, that does not need to run as root:
|
||||||
# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install
|
# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems.
|
||||||
|
ARG USER_ID=1000
|
||||||
|
ARG GROUP_ID=1000
|
||||||
RUN set -x ; \
|
RUN set -x ; \
|
||||||
addgroup -Sg 82 www-data 2>/dev/null ; \
|
deluser --remove-home www-data ; \
|
||||||
adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \
|
addgroup -S -g ${GROUP_ID} conduit 2>/dev/null ; \
|
||||||
addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1
|
adduser -S -u ${USER_ID} -D -H -h /srv/conduit -G conduit -g conduit conduit 2>/dev/null ; \
|
||||||
|
addgroup conduit conduit 2>/dev/null && exit 0 ; exit 1
|
||||||
|
|
||||||
# Change ownership of Conduit files to www-data user and group
|
# Change ownership of Conduit files to conduit user and group
|
||||||
RUN chown -cR www-data:www-data /srv/conduit
|
RUN chown -cR conduit:conduit /srv/conduit && \
|
||||||
RUN chmod +x /srv/conduit/healthcheck.sh
|
chmod +x /srv/conduit/healthcheck.sh && \
|
||||||
|
mkdir -p ${DEFAULT_DB_PATH} && \
|
||||||
|
chown -cR conduit:conduit ${DEFAULT_DB_PATH}
|
||||||
|
|
||||||
|
# Change user to conduit
|
||||||
# Test if Conduit is still alive, uses the same endpoint as Element
|
USER conduit
|
||||||
HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh
|
|
||||||
|
|
||||||
# Set user to www-data
|
|
||||||
USER www-data
|
|
||||||
# Set container home directory
|
# Set container home directory
|
||||||
WORKDIR /srv/conduit
|
WORKDIR /srv/conduit
|
||||||
# Run Conduit
|
|
||||||
|
# Run Conduit and print backtraces on panics
|
||||||
|
ENV RUST_BACKTRACE=1
|
||||||
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
ENTRYPOINT [ "/srv/conduit/conduit" ]
|
||||||
|
|
||||||
|
# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64")
|
||||||
|
# copy the matching binary into this docker image
|
||||||
|
ARG TARGETPLATFORM
|
||||||
|
COPY --chown=conduit:conduit ./$TARGETPLATFORM /srv/conduit/conduit
|
||||||
|
@ -0,0 +1,68 @@
|
|||||||
|
# Conduit - Behind Traefik Reverse Proxy
|
||||||
|
version: '3'
|
||||||
|
|
||||||
|
services:
|
||||||
|
homeserver:
|
||||||
|
### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image,
|
||||||
|
### then you are ready to go.
|
||||||
|
image: matrixconduit/matrix-conduit:latest
|
||||||
|
### If you want to build a fresh image from the sources, then comment the image line and uncomment the
|
||||||
|
### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this:
|
||||||
|
### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d
|
||||||
|
# build:
|
||||||
|
# context: .
|
||||||
|
# args:
|
||||||
|
# CREATED: '2021-03-16T08:18:27Z'
|
||||||
|
# VERSION: '0.1.0'
|
||||||
|
# LOCAL: 'false'
|
||||||
|
# GIT_REF: origin/master
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- db:/var/lib/matrix-conduit/
|
||||||
|
networks:
|
||||||
|
- proxy
|
||||||
|
environment:
|
||||||
|
CONDUIT_SERVER_NAME: your.server.name # EDIT THIS
|
||||||
|
CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/
|
||||||
|
CONDUIT_DATABASE_BACKEND: rocksdb
|
||||||
|
CONDUIT_PORT: 6167
|
||||||
|
CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB
|
||||||
|
CONDUIT_ALLOW_REGISTRATION: 'true'
|
||||||
|
CONDUIT_ALLOW_FEDERATION: 'true'
|
||||||
|
CONDUIT_TRUSTED_SERVERS: '["matrix.org"]'
|
||||||
|
#CONDUIT_MAX_CONCURRENT_REQUESTS: 100
|
||||||
|
#CONDUIT_LOG: warn,rocket=off,_=off,sled=off
|
||||||
|
CONDUIT_ADDRESS: 0.0.0.0
|
||||||
|
CONDUIT_CONFIG: '' # Ignore this
|
||||||
|
|
||||||
|
# We need some way to server the client and server .well-known json. The simplest way is to use a nginx container
|
||||||
|
# to serve those two as static files. If you want to use a different way, delete or comment the below service, here
|
||||||
|
# and in the docker-compose override file.
|
||||||
|
well-known:
|
||||||
|
image: nginx:latest
|
||||||
|
restart: unless-stopped
|
||||||
|
volumes:
|
||||||
|
- ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files
|
||||||
|
- ./nginx/www:/var/www/ # location of the client and server .well-known-files
|
||||||
|
### Uncomment if you want to use your own Element-Web App.
|
||||||
|
### Note: You need to provide a config.json for Element and you also need a second
|
||||||
|
### Domain or Subdomain for the communication between Element and Conduit
|
||||||
|
### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md
|
||||||
|
# element-web:
|
||||||
|
# image: vectorim/element-web:latest
|
||||||
|
# restart: unless-stopped
|
||||||
|
# volumes:
|
||||||
|
# - ./element_config.json:/app/config.json
|
||||||
|
# networks:
|
||||||
|
# - proxy
|
||||||
|
# depends_on:
|
||||||
|
# - homeserver
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
db:
|
||||||
|
|
||||||
|
networks:
|
||||||
|
# This is the network Traefik listens to, if your network has a different
|
||||||
|
# name, don't forget to change it here and in the docker-compose.override.yml
|
||||||
|
proxy:
|
||||||
|
external: true
|
@ -1,13 +1,14 @@
|
|||||||
#!/bin/sh
|
#!/bin/sh
|
||||||
|
|
||||||
# If the port is not specified as env var, take it from the config file
|
# If the config file does not contain a default port and the CONDUIT_PORT env is not set, create
|
||||||
if [ -z ${CONDUIT_PORT} ]; then
|
# try to get port from process list
|
||||||
CONDUIT_PORT=$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*')
|
if [ -z "${CONDUIT_PORT}" ]; then
|
||||||
|
CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*')
|
||||||
fi
|
fi
|
||||||
|
|
||||||
# The actual health check.
|
# The actual health check.
|
||||||
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
# We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1.
|
||||||
# TODO: Change this to a single curl call. Do we have a config value that we can check for that?
|
# TODO: Change this to a single wget call. Do we have a config value that we can check for that?
|
||||||
curl --fail -s "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
||||||
curl -k --fail -s "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \
|
||||||
exit 1
|
exit 1
|
||||||
|
@ -1 +0,0 @@
|
|||||||
1.53
|
|
@ -0,0 +1,420 @@
|
|||||||
|
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
||||||
|
use crate::{api::client_server, services, utils, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{
|
||||||
|
account::{
|
||||||
|
change_password, deactivate, get_3pids, get_username_availability, register, whoami,
|
||||||
|
ThirdPartyIdRemovalStatus,
|
||||||
|
},
|
||||||
|
error::ErrorKind,
|
||||||
|
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
||||||
|
},
|
||||||
|
events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType},
|
||||||
|
push, UserId,
|
||||||
|
};
|
||||||
|
use tracing::{info, warn};
|
||||||
|
|
||||||
|
use register::RegistrationKind;
|
||||||
|
|
||||||
|
const RANDOM_USER_ID_LENGTH: usize = 10;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/register/available`
|
||||||
|
///
|
||||||
|
/// Checks if a username is valid and available on this server.
|
||||||
|
///
|
||||||
|
/// Conditions for returning true:
|
||||||
|
/// - The user id is not historical
|
||||||
|
/// - The server name of the user id matches this server
|
||||||
|
/// - No user or appservice on this server already claimed this username
|
||||||
|
///
|
||||||
|
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
||||||
|
pub async fn get_register_available_route(
|
||||||
|
body: Ruma<get_username_availability::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_username_availability::v3::Response> {
|
||||||
|
// Validate user id
|
||||||
|
let user_id = UserId::parse_with_server_name(
|
||||||
|
body.username.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
|
.ok()
|
||||||
|
.filter(|user_id| {
|
||||||
|
!user_id.is_historical() && user_id.server_name() == services().globals.server_name()
|
||||||
|
})
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidUsername,
|
||||||
|
"Username is invalid.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
// Check if username is creative enough
|
||||||
|
if services().users.exists(&user_id)? {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::UserInUse,
|
||||||
|
"Desired user ID is already taken.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO add check for appservice namespaces
|
||||||
|
|
||||||
|
// If no if check is true we have an username that's available to be used.
|
||||||
|
Ok(get_username_availability::v3::Response { available: true })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/register`
|
||||||
|
///
|
||||||
|
/// Register an account on this homeserver.
|
||||||
|
///
|
||||||
|
/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html)
|
||||||
|
/// to check if the user id is valid and available.
|
||||||
|
///
|
||||||
|
/// - Only works if registration is enabled
|
||||||
|
/// - If type is guest: ignores all parameters except initial_device_display_name
|
||||||
|
/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
|
||||||
|
/// - If type is not guest and no username is given: Always fails after UIAA check
|
||||||
|
/// - Creates a new account and populates it with default account data
|
||||||
|
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
||||||
|
pub async fn register_route(
|
||||||
|
body: Ruma<register::v3::IncomingRequest>,
|
||||||
|
) -> Result<register::v3::Response> {
|
||||||
|
if !services().globals.allow_registration() && !body.from_appservice {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Registration has been disabled.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let is_guest = body.kind == RegistrationKind::Guest;
|
||||||
|
|
||||||
|
let user_id = match (&body.username, is_guest) {
|
||||||
|
(Some(username), false) => {
|
||||||
|
let proposed_user_id = UserId::parse_with_server_name(
|
||||||
|
username.to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
|
.ok()
|
||||||
|
.filter(|user_id| {
|
||||||
|
!user_id.is_historical()
|
||||||
|
&& user_id.server_name() == services().globals.server_name()
|
||||||
|
})
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidUsername,
|
||||||
|
"Username is invalid.",
|
||||||
|
))?;
|
||||||
|
if services().users.exists(&proposed_user_id)? {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::UserInUse,
|
||||||
|
"Desired user ID is already taken.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
proposed_user_id
|
||||||
|
}
|
||||||
|
_ => loop {
|
||||||
|
let proposed_user_id = UserId::parse_with_server_name(
|
||||||
|
utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
|
.unwrap();
|
||||||
|
if !services().users.exists(&proposed_user_id)? {
|
||||||
|
break proposed_user_id;
|
||||||
|
}
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
// UIAA
|
||||||
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
flows: vec![AuthFlow {
|
||||||
|
stages: vec![AuthType::Dummy],
|
||||||
|
}],
|
||||||
|
completed: Vec::new(),
|
||||||
|
params: Default::default(),
|
||||||
|
session: None,
|
||||||
|
auth_error: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if !body.from_appservice {
|
||||||
|
if let Some(auth) = &body.auth {
|
||||||
|
let (worked, uiaainfo) = services().uiaa.try_auth(
|
||||||
|
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
|
.expect("we know this is valid"),
|
||||||
|
"".into(),
|
||||||
|
auth,
|
||||||
|
&uiaainfo,
|
||||||
|
)?;
|
||||||
|
if !worked {
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
}
|
||||||
|
// Success!
|
||||||
|
} else if let Some(json) = body.json_body {
|
||||||
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
|
services().uiaa.create(
|
||||||
|
&UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
|
.expect("we know this is valid"),
|
||||||
|
"".into(),
|
||||||
|
&uiaainfo,
|
||||||
|
&json,
|
||||||
|
)?;
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let password = if is_guest {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
body.password.as_deref()
|
||||||
|
};
|
||||||
|
|
||||||
|
// Create user
|
||||||
|
services().users.create(&user_id, password)?;
|
||||||
|
|
||||||
|
// Default to pretty displayname
|
||||||
|
let mut displayname = user_id.localpart().to_owned();
|
||||||
|
|
||||||
|
// If enabled append lightning bolt to display name (default true)
|
||||||
|
if services().globals.enable_lightning_bolt() {
|
||||||
|
displayname.push_str(" ⚡️");
|
||||||
|
}
|
||||||
|
|
||||||
|
services()
|
||||||
|
.users
|
||||||
|
.set_displayname(&user_id, Some(displayname.clone()))?;
|
||||||
|
|
||||||
|
// Initial account data
|
||||||
|
services().account_data.update(
|
||||||
|
None,
|
||||||
|
&user_id,
|
||||||
|
GlobalAccountDataEventType::PushRules.to_string().into(),
|
||||||
|
&serde_json::to_value(ruma::events::push_rules::PushRulesEvent {
|
||||||
|
content: ruma::events::push_rules::PushRulesEventContent {
|
||||||
|
global: push::Ruleset::server_default(&user_id),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
.expect("to json always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Inhibit login does not work for guests
|
||||||
|
if !is_guest && body.inhibit_login {
|
||||||
|
return Ok(register::v3::Response {
|
||||||
|
access_token: None,
|
||||||
|
user_id,
|
||||||
|
device_id: None,
|
||||||
|
refresh_token: None,
|
||||||
|
expires_in: None,
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
// Generate new device id if the user didn't specify one
|
||||||
|
let device_id = if is_guest {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
body.device_id.clone()
|
||||||
|
}
|
||||||
|
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
||||||
|
|
||||||
|
// Generate new token for the device
|
||||||
|
let token = utils::random_string(TOKEN_LENGTH);
|
||||||
|
|
||||||
|
// Create device for this account
|
||||||
|
services().users.create_device(
|
||||||
|
&user_id,
|
||||||
|
&device_id,
|
||||||
|
&token,
|
||||||
|
body.initial_device_display_name.clone(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
info!("New user {} registered on this server.", user_id);
|
||||||
|
services()
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
|
"New user {} registered on this server.",
|
||||||
|
user_id
|
||||||
|
)));
|
||||||
|
|
||||||
|
// If this is the first real user, grant them admin privileges
|
||||||
|
// Note: the server user, @conduit:servername, is generated first
|
||||||
|
if services().users.count()? == 2 {
|
||||||
|
services()
|
||||||
|
.admin
|
||||||
|
.make_user_admin(&user_id, displayname)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
warn!("Granting {} admin privileges as the first user", user_id);
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(register::v3::Response {
|
||||||
|
access_token: Some(token),
|
||||||
|
user_id,
|
||||||
|
device_id: Some(device_id),
|
||||||
|
refresh_token: None,
|
||||||
|
expires_in: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/account/password`
|
||||||
|
///
|
||||||
|
/// Changes the password of this account.
|
||||||
|
///
|
||||||
|
/// - Requires UIAA to verify user password
|
||||||
|
/// - Changes the password of the sender user
|
||||||
|
/// - The password hash is calculated using argon2 with 32 character salt, the plain password is
|
||||||
|
/// not saved
|
||||||
|
///
|
||||||
|
/// If logout_devices is true it does the following for each device except the sender device:
|
||||||
|
/// - Invalidates access token
|
||||||
|
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
||||||
|
/// - Forgets to-device events
|
||||||
|
/// - Triggers device list updates
|
||||||
|
pub async fn change_password_route(
|
||||||
|
body: Ruma<change_password::v3::IncomingRequest>,
|
||||||
|
) -> Result<change_password::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
flows: vec![AuthFlow {
|
||||||
|
stages: vec![AuthType::Password],
|
||||||
|
}],
|
||||||
|
completed: Vec::new(),
|
||||||
|
params: Default::default(),
|
||||||
|
session: None,
|
||||||
|
auth_error: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(auth) = &body.auth {
|
||||||
|
let (worked, uiaainfo) =
|
||||||
|
services()
|
||||||
|
.uiaa
|
||||||
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
|
if !worked {
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
}
|
||||||
|
// Success!
|
||||||
|
} else if let Some(json) = body.json_body {
|
||||||
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
|
services()
|
||||||
|
.uiaa
|
||||||
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
|
}
|
||||||
|
|
||||||
|
services()
|
||||||
|
.users
|
||||||
|
.set_password(sender_user, Some(&body.new_password))?;
|
||||||
|
|
||||||
|
if body.logout_devices {
|
||||||
|
// Logout all devices except the current one
|
||||||
|
for id in services()
|
||||||
|
.users
|
||||||
|
.all_device_ids(sender_user)
|
||||||
|
.filter_map(|id| id.ok())
|
||||||
|
.filter(|id| id != sender_device)
|
||||||
|
{
|
||||||
|
services().users.remove_device(sender_user, &id)?;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
info!("User {} changed their password.", sender_user);
|
||||||
|
services()
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
|
"User {} changed their password.",
|
||||||
|
sender_user
|
||||||
|
)));
|
||||||
|
|
||||||
|
Ok(change_password::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET _matrix/client/r0/account/whoami`
|
||||||
|
///
|
||||||
|
/// Get user_id of the sender user.
|
||||||
|
///
|
||||||
|
/// Note: Also works for Application Services
|
||||||
|
pub async fn whoami_route(body: Ruma<whoami::v3::Request>) -> Result<whoami::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let device_id = body.sender_device.as_ref().cloned();
|
||||||
|
|
||||||
|
Ok(whoami::v3::Response {
|
||||||
|
user_id: sender_user.clone(),
|
||||||
|
device_id,
|
||||||
|
is_guest: services().users.is_deactivated(sender_user)? && !body.from_appservice,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/account/deactivate`
|
||||||
|
///
|
||||||
|
/// Deactivate sender user account.
|
||||||
|
///
|
||||||
|
/// - Leaves all rooms and rejects all invitations
|
||||||
|
/// - Invalidates all access tokens
|
||||||
|
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
|
||||||
|
/// - Forgets all to-device events
|
||||||
|
/// - Triggers device list updates
|
||||||
|
/// - Removes ability to log in again
|
||||||
|
pub async fn deactivate_route(
|
||||||
|
body: Ruma<deactivate::v3::IncomingRequest>,
|
||||||
|
) -> Result<deactivate::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let mut uiaainfo = UiaaInfo {
|
||||||
|
flows: vec![AuthFlow {
|
||||||
|
stages: vec![AuthType::Password],
|
||||||
|
}],
|
||||||
|
completed: Vec::new(),
|
||||||
|
params: Default::default(),
|
||||||
|
session: None,
|
||||||
|
auth_error: None,
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(auth) = &body.auth {
|
||||||
|
let (worked, uiaainfo) =
|
||||||
|
services()
|
||||||
|
.uiaa
|
||||||
|
.try_auth(sender_user, sender_device, auth, &uiaainfo)?;
|
||||||
|
if !worked {
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
}
|
||||||
|
// Success!
|
||||||
|
} else if let Some(json) = body.json_body {
|
||||||
|
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
||||||
|
services()
|
||||||
|
.uiaa
|
||||||
|
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
||||||
|
return Err(Error::Uiaa(uiaainfo));
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Make the user leave all rooms before deactivation
|
||||||
|
client_server::leave_all_rooms(sender_user).await?;
|
||||||
|
|
||||||
|
// Remove devices and mark account as deactivated
|
||||||
|
services().users.deactivate_account(sender_user)?;
|
||||||
|
|
||||||
|
info!("User {} deactivated their account.", sender_user);
|
||||||
|
services()
|
||||||
|
.admin
|
||||||
|
.send_message(RoomMessageEventContent::notice_plain(format!(
|
||||||
|
"User {} deactivated their account.",
|
||||||
|
sender_user
|
||||||
|
)));
|
||||||
|
|
||||||
|
Ok(deactivate::v3::Response {
|
||||||
|
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET _matrix/client/r0/account/3pid`
|
||||||
|
///
|
||||||
|
/// Get a list of third party identifiers associated with this account.
|
||||||
|
///
|
||||||
|
/// - Currently always returns empty list
|
||||||
|
pub async fn third_party_route(
|
||||||
|
body: Ruma<get_3pids::v3::Request>,
|
||||||
|
) -> Result<get_3pids::v3::Response> {
|
||||||
|
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
Ok(get_3pids::v3::Response::new(Vec::new()))
|
||||||
|
}
|
@ -0,0 +1,362 @@
|
|||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::api::client::{
|
||||||
|
backup::{
|
||||||
|
add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session,
|
||||||
|
create_backup_version, delete_backup_keys, delete_backup_keys_for_room,
|
||||||
|
delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys,
|
||||||
|
get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info,
|
||||||
|
update_backup_version,
|
||||||
|
},
|
||||||
|
error::ErrorKind,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/room_keys/version`
|
||||||
|
///
|
||||||
|
/// Creates a new backup.
|
||||||
|
pub async fn create_backup_version_route(
|
||||||
|
body: Ruma<create_backup_version::v3::Request>,
|
||||||
|
) -> Result<create_backup_version::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let version = services()
|
||||||
|
.key_backups
|
||||||
|
.create_backup(sender_user, &body.algorithm)?;
|
||||||
|
|
||||||
|
Ok(create_backup_version::v3::Response { version })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
|
||||||
|
///
|
||||||
|
/// Update information about an existing backup. Only `auth_data` can be modified.
|
||||||
|
pub async fn update_backup_version_route(
|
||||||
|
body: Ruma<update_backup_version::v3::IncomingRequest>,
|
||||||
|
) -> Result<update_backup_version::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
services()
|
||||||
|
.key_backups
|
||||||
|
.update_backup(sender_user, &body.version, &body.algorithm)?;
|
||||||
|
|
||||||
|
Ok(update_backup_version::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/version`
|
||||||
|
///
|
||||||
|
/// Get information about the latest backup version.
|
||||||
|
pub async fn get_latest_backup_info_route(
|
||||||
|
body: Ruma<get_latest_backup_info::v3::Request>,
|
||||||
|
) -> Result<get_latest_backup_info::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let (version, algorithm) = services()
|
||||||
|
.key_backups
|
||||||
|
.get_latest_backup(sender_user)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Key backup does not exist.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_latest_backup_info::v3::Response {
|
||||||
|
algorithm,
|
||||||
|
count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(),
|
||||||
|
etag: services().key_backups.get_etag(sender_user, &version)?,
|
||||||
|
version,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/version`
|
||||||
|
///
|
||||||
|
/// Get information about an existing backup.
|
||||||
|
pub async fn get_backup_info_route(
|
||||||
|
body: Ruma<get_backup_info::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_backup_info::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let algorithm = services()
|
||||||
|
.key_backups
|
||||||
|
.get_backup(sender_user, &body.version)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Key backup does not exist.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_backup_info::v3::Response {
|
||||||
|
algorithm,
|
||||||
|
count: (services()
|
||||||
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
version: body.version.to_owned(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
|
||||||
|
///
|
||||||
|
/// Delete an existing key backup.
|
||||||
|
///
|
||||||
|
/// - Deletes both information about the backup, as well as all key data related to the backup
|
||||||
|
pub async fn delete_backup_version_route(
|
||||||
|
body: Ruma<delete_backup_version::v3::IncomingRequest>,
|
||||||
|
) -> Result<delete_backup_version::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
services()
|
||||||
|
.key_backups
|
||||||
|
.delete_backup(sender_user, &body.version)?;
|
||||||
|
|
||||||
|
Ok(delete_backup_version::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/room_keys/keys`
|
||||||
|
///
|
||||||
|
/// Add the received backup keys to the database.
|
||||||
|
///
|
||||||
|
/// - Only manipulating the most recently created version of the backup is allowed
|
||||||
|
/// - Adds the keys to the backup
|
||||||
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
|
pub async fn add_backup_keys_route(
|
||||||
|
body: Ruma<add_backup_keys::v3::IncomingRequest>,
|
||||||
|
) -> Result<add_backup_keys::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if Some(&body.version)
|
||||||
|
!= services()
|
||||||
|
.key_backups
|
||||||
|
.get_latest_backup_version(sender_user)?
|
||||||
|
.as_ref()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"You may only manipulate the most recently created version of the backup.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (room_id, room) in &body.rooms {
|
||||||
|
for (session_id, key_data) in &room.sessions {
|
||||||
|
services().key_backups.add_key(
|
||||||
|
sender_user,
|
||||||
|
&body.version,
|
||||||
|
room_id,
|
||||||
|
session_id,
|
||||||
|
key_data,
|
||||||
|
)?
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(add_backup_keys::v3::Response {
|
||||||
|
count: (services()
|
||||||
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||||
|
///
|
||||||
|
/// Add the received backup keys to the database.
|
||||||
|
///
|
||||||
|
/// - Only manipulating the most recently created version of the backup is allowed
|
||||||
|
/// - Adds the keys to the backup
|
||||||
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
|
pub async fn add_backup_keys_for_room_route(
|
||||||
|
body: Ruma<add_backup_keys_for_room::v3::IncomingRequest>,
|
||||||
|
) -> Result<add_backup_keys_for_room::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if Some(&body.version)
|
||||||
|
!= services()
|
||||||
|
.key_backups
|
||||||
|
.get_latest_backup_version(sender_user)?
|
||||||
|
.as_ref()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"You may only manipulate the most recently created version of the backup.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
for (session_id, key_data) in &body.sessions {
|
||||||
|
services().key_backups.add_key(
|
||||||
|
sender_user,
|
||||||
|
&body.version,
|
||||||
|
&body.room_id,
|
||||||
|
session_id,
|
||||||
|
key_data,
|
||||||
|
)?
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(add_backup_keys_for_room::v3::Response {
|
||||||
|
count: (services()
|
||||||
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||||
|
///
|
||||||
|
/// Add the received backup key to the database.
|
||||||
|
///
|
||||||
|
/// - Only manipulating the most recently created version of the backup is allowed
|
||||||
|
/// - Adds the keys to the backup
|
||||||
|
/// - Returns the new number of keys in this backup and the etag
|
||||||
|
pub async fn add_backup_keys_for_session_route(
|
||||||
|
body: Ruma<add_backup_keys_for_session::v3::IncomingRequest>,
|
||||||
|
) -> Result<add_backup_keys_for_session::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if Some(&body.version)
|
||||||
|
!= services()
|
||||||
|
.key_backups
|
||||||
|
.get_latest_backup_version(sender_user)?
|
||||||
|
.as_ref()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"You may only manipulate the most recently created version of the backup.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
services().key_backups.add_key(
|
||||||
|
sender_user,
|
||||||
|
&body.version,
|
||||||
|
&body.room_id,
|
||||||
|
&body.session_id,
|
||||||
|
&body.session_data,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(add_backup_keys_for_session::v3::Response {
|
||||||
|
count: (services()
|
||||||
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/keys`
|
||||||
|
///
|
||||||
|
/// Retrieves all keys from the backup.
|
||||||
|
pub async fn get_backup_keys_route(
|
||||||
|
body: Ruma<get_backup_keys::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_backup_keys::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let rooms = services().key_backups.get_all(sender_user, &body.version)?;
|
||||||
|
|
||||||
|
Ok(get_backup_keys::v3::Response { rooms })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||||
|
///
|
||||||
|
/// Retrieves all keys from the backup for a given room.
|
||||||
|
pub async fn get_backup_keys_for_room_route(
|
||||||
|
body: Ruma<get_backup_keys_for_room::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_backup_keys_for_room::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let sessions = services()
|
||||||
|
.key_backups
|
||||||
|
.get_room(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
|
Ok(get_backup_keys_for_room::v3::Response { sessions })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||||
|
///
|
||||||
|
/// Retrieves a key from the backup.
|
||||||
|
pub async fn get_backup_keys_for_session_route(
|
||||||
|
body: Ruma<get_backup_keys_for_session::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_backup_keys_for_session::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let key_data = services()
|
||||||
|
.key_backups
|
||||||
|
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Backup key not found for this user's session.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
Ok(get_backup_keys_for_session::v3::Response { key_data })
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/room_keys/keys`
|
||||||
|
///
|
||||||
|
/// Delete the keys from the backup.
|
||||||
|
pub async fn delete_backup_keys_route(
|
||||||
|
body: Ruma<delete_backup_keys::v3::IncomingRequest>,
|
||||||
|
) -> Result<delete_backup_keys::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
services()
|
||||||
|
.key_backups
|
||||||
|
.delete_all_keys(sender_user, &body.version)?;
|
||||||
|
|
||||||
|
Ok(delete_backup_keys::v3::Response {
|
||||||
|
count: (services()
|
||||||
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
|
||||||
|
///
|
||||||
|
/// Delete the keys from the backup for a given room.
|
||||||
|
pub async fn delete_backup_keys_for_room_route(
|
||||||
|
body: Ruma<delete_backup_keys_for_room::v3::IncomingRequest>,
|
||||||
|
) -> Result<delete_backup_keys_for_room::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
services()
|
||||||
|
.key_backups
|
||||||
|
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
||||||
|
|
||||||
|
Ok(delete_backup_keys_for_room::v3::Response {
|
||||||
|
count: (services()
|
||||||
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
||||||
|
///
|
||||||
|
/// Delete a key from the backup.
|
||||||
|
pub async fn delete_backup_keys_for_session_route(
|
||||||
|
body: Ruma<delete_backup_keys_for_session::v3::IncomingRequest>,
|
||||||
|
) -> Result<delete_backup_keys_for_session::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
services().key_backups.delete_room_key(
|
||||||
|
sender_user,
|
||||||
|
&body.version,
|
||||||
|
&body.room_id,
|
||||||
|
&body.session_id,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(delete_backup_keys_for_session::v3::Response {
|
||||||
|
count: (services()
|
||||||
|
.key_backups
|
||||||
|
.count_keys(sender_user, &body.version)? as u32)
|
||||||
|
.into(),
|
||||||
|
etag: services()
|
||||||
|
.key_backups
|
||||||
|
.get_etag(sender_user, &body.version)?,
|
||||||
|
})
|
||||||
|
}
|
@ -0,0 +1,28 @@
|
|||||||
|
use crate::{services, Result, Ruma};
|
||||||
|
use ruma::api::client::discovery::get_capabilities::{
|
||||||
|
self, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/capabilities`
|
||||||
|
///
|
||||||
|
/// Get information on the supported feature set and other relevent capabilities of this server.
|
||||||
|
pub async fn get_capabilities_route(
|
||||||
|
_body: Ruma<get_capabilities::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_capabilities::v3::Response> {
|
||||||
|
let mut available = BTreeMap::new();
|
||||||
|
for room_version in &services().globals.unstable_room_versions {
|
||||||
|
available.insert(room_version.clone(), RoomVersionStability::Unstable);
|
||||||
|
}
|
||||||
|
for room_version in &services().globals.stable_room_versions {
|
||||||
|
available.insert(room_version.clone(), RoomVersionStability::Stable);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut capabilities = Capabilities::new();
|
||||||
|
capabilities.room_versions = RoomVersionsCapability {
|
||||||
|
default: services().globals.default_room_version(),
|
||||||
|
available,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(get_capabilities::v3::Response { capabilities })
|
||||||
|
}
|
@ -0,0 +1,203 @@
|
|||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions},
|
||||||
|
events::StateEventType,
|
||||||
|
};
|
||||||
|
use std::{collections::HashSet, convert::TryFrom};
|
||||||
|
use tracing::error;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
||||||
|
///
|
||||||
|
/// Allows loading room history around an event.
|
||||||
|
///
|
||||||
|
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
||||||
|
/// joined, depending on history_visibility)
|
||||||
|
pub async fn get_context_route(
|
||||||
|
body: Ruma<get_context::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_context::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options {
|
||||||
|
LazyLoadOptions::Enabled {
|
||||||
|
include_redundant_members,
|
||||||
|
} => (true, *include_redundant_members),
|
||||||
|
_ => (false, false),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
|
let base_pdu_id = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_id(&body.event_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Base event id not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?;
|
||||||
|
|
||||||
|
let base_event = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_from_id(&base_pdu_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Base event not found.",
|
||||||
|
))?;
|
||||||
|
|
||||||
|
let room_id = base_event.room_id.clone();
|
||||||
|
|
||||||
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &room_id)?
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"You don't have permission to view this room.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&room_id,
|
||||||
|
&base_event.sender,
|
||||||
|
)? || lazy_load_send_redundant
|
||||||
|
{
|
||||||
|
lazy_loaded.insert(base_event.sender.as_str().to_owned());
|
||||||
|
}
|
||||||
|
|
||||||
|
let base_event = base_event.to_room_event();
|
||||||
|
|
||||||
|
let events_before: Vec<_> = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.pdus_until(sender_user, &room_id, base_token)?
|
||||||
|
.take(
|
||||||
|
u32::try_from(body.limit).map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
||||||
|
})? as usize
|
||||||
|
/ 2,
|
||||||
|
)
|
||||||
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for (_, event) in &events_before {
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? || lazy_load_send_redundant
|
||||||
|
{
|
||||||
|
lazy_loaded.insert(event.sender.as_str().to_owned());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let start_token = events_before
|
||||||
|
.last()
|
||||||
|
.and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok())
|
||||||
|
.map(|count| count.to_string());
|
||||||
|
|
||||||
|
let events_before: Vec<_> = events_before
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let events_after: Vec<_> = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.pdus_after(sender_user, &room_id, base_token)?
|
||||||
|
.take(
|
||||||
|
u32::try_from(body.limit).map_err(|_| {
|
||||||
|
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
||||||
|
})? as usize
|
||||||
|
/ 2,
|
||||||
|
)
|
||||||
|
.filter_map(|r| r.ok()) // Remove buggy events
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for (_, event) in &events_after {
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? || lazy_load_send_redundant
|
||||||
|
{
|
||||||
|
lazy_loaded.insert(event.sender.as_str().to_owned());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash(
|
||||||
|
events_after
|
||||||
|
.last()
|
||||||
|
.map_or(&*body.event_id, |(_, e)| &*e.event_id),
|
||||||
|
)? {
|
||||||
|
Some(s) => s,
|
||||||
|
None => services()
|
||||||
|
.rooms
|
||||||
|
.state
|
||||||
|
.get_room_shortstatehash(&room_id)?
|
||||||
|
.expect("All rooms have state"),
|
||||||
|
};
|
||||||
|
|
||||||
|
let state_ids = services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.state_full_ids(shortstatehash)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
let end_token = events_after
|
||||||
|
.last()
|
||||||
|
.and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok())
|
||||||
|
.map(|count| count.to_string());
|
||||||
|
|
||||||
|
let events_after: Vec<_> = events_after
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
let mut state = Vec::new();
|
||||||
|
|
||||||
|
for (shortstatekey, id) in state_ids {
|
||||||
|
let (event_type, state_key) = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_statekey_from_short(shortstatekey)?;
|
||||||
|
|
||||||
|
if event_type != StateEventType::RoomMember {
|
||||||
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => {
|
||||||
|
error!("Pdu in state not found: {}", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
state.push(pdu.to_state_event());
|
||||||
|
} else if !lazy_load_enabled || lazy_loaded.contains(&state_key) {
|
||||||
|
let pdu = match services().rooms.timeline.get_pdu(&id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
None => {
|
||||||
|
error!("Pdu in state not found: {}", id);
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
};
|
||||||
|
state.push(pdu.to_state_event());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let resp = get_context::v3::Response {
|
||||||
|
start: start_token,
|
||||||
|
end: end_token,
|
||||||
|
events_before,
|
||||||
|
event: Some(base_event),
|
||||||
|
events_after,
|
||||||
|
state,
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(resp)
|
||||||
|
}
|
@ -0,0 +1,34 @@
|
|||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
filter::{create_filter, get_filter},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
||||||
|
///
|
||||||
|
/// Loads a filter that was previously created.
|
||||||
|
///
|
||||||
|
/// - A user can only access their own filters
|
||||||
|
pub async fn get_filter_route(
|
||||||
|
body: Ruma<get_filter::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_filter::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let filter = match services().users.get_filter(sender_user, &body.filter_id)? {
|
||||||
|
Some(filter) => filter,
|
||||||
|
None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(get_filter::v3::Response::new(filter))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
|
||||||
|
///
|
||||||
|
/// Creates a new filter to be used by other endpoints.
|
||||||
|
pub async fn create_filter_route(
|
||||||
|
body: Ruma<create_filter::v3::IncomingRequest>,
|
||||||
|
) -> Result<create_filter::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
Ok(create_filter::v3::Response::new(
|
||||||
|
services().users.create_filter(sender_user, &body.filter)?,
|
||||||
|
))
|
||||||
|
}
|
@ -0,0 +1,217 @@
|
|||||||
|
use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma};
|
||||||
|
use ruma::api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
media::{
|
||||||
|
create_content, get_content, get_content_as_filename, get_content_thumbnail,
|
||||||
|
get_media_config,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
const MXC_LENGTH: usize = 32;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/media/r0/config`
|
||||||
|
///
|
||||||
|
/// Returns max upload size.
|
||||||
|
pub async fn get_media_config_route(
|
||||||
|
_body: Ruma<get_media_config::v3::Request>,
|
||||||
|
) -> Result<get_media_config::v3::Response> {
|
||||||
|
Ok(get_media_config::v3::Response {
|
||||||
|
upload_size: services().globals.max_request_size().into(),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/media/r0/upload`
|
||||||
|
///
|
||||||
|
/// Permanently save media in the server.
|
||||||
|
///
|
||||||
|
/// - Some metadata will be saved in the database
|
||||||
|
/// - Media will be saved in the media/ directory
|
||||||
|
pub async fn create_content_route(
|
||||||
|
body: Ruma<create_content::v3::IncomingRequest>,
|
||||||
|
) -> Result<create_content::v3::Response> {
|
||||||
|
let mxc = format!(
|
||||||
|
"mxc://{}/{}",
|
||||||
|
services().globals.server_name(),
|
||||||
|
utils::random_string(MXC_LENGTH)
|
||||||
|
);
|
||||||
|
|
||||||
|
services()
|
||||||
|
.media
|
||||||
|
.create(
|
||||||
|
mxc.clone(),
|
||||||
|
body.filename
|
||||||
|
.as_ref()
|
||||||
|
.map(|filename| "inline; filename=".to_owned() + filename)
|
||||||
|
.as_deref(),
|
||||||
|
body.content_type.as_deref(),
|
||||||
|
&body.file,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(create_content::v3::Response {
|
||||||
|
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
||||||
|
blurhash: None,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
pub async fn get_remote_content(
|
||||||
|
mxc: &str,
|
||||||
|
server_name: &ruma::ServerName,
|
||||||
|
media_id: &str,
|
||||||
|
) -> Result<get_content::v3::Response, Error> {
|
||||||
|
let content_response = services()
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
server_name,
|
||||||
|
get_content::v3::Request {
|
||||||
|
allow_remote: false,
|
||||||
|
server_name,
|
||||||
|
media_id,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
services()
|
||||||
|
.media
|
||||||
|
.create(
|
||||||
|
mxc.to_string(),
|
||||||
|
content_response.content_disposition.as_deref(),
|
||||||
|
content_response.content_type.as_deref(),
|
||||||
|
&content_response.file,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(content_response)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}`
|
||||||
|
///
|
||||||
|
/// Load media from our server or over federation.
|
||||||
|
///
|
||||||
|
/// - Only allows federation if `allow_remote` is true
|
||||||
|
pub async fn get_content_route(
|
||||||
|
body: Ruma<get_content::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_content::v3::Response> {
|
||||||
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
|
if let Some(FileMeta {
|
||||||
|
content_disposition,
|
||||||
|
content_type,
|
||||||
|
file,
|
||||||
|
}) = services().media.get(mxc.clone()).await?
|
||||||
|
{
|
||||||
|
Ok(get_content::v3::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
|
let remote_content_response =
|
||||||
|
get_remote_content(&mxc, &body.server_name, &body.media_id).await?;
|
||||||
|
Ok(remote_content_response)
|
||||||
|
} else {
|
||||||
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}/{fileName}`
|
||||||
|
///
|
||||||
|
/// Load media from our server or over federation, permitting desired filename.
|
||||||
|
///
|
||||||
|
/// - Only allows federation if `allow_remote` is true
|
||||||
|
pub async fn get_content_as_filename_route(
|
||||||
|
body: Ruma<get_content_as_filename::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_content_as_filename::v3::Response> {
|
||||||
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
|
if let Some(FileMeta {
|
||||||
|
content_disposition: _,
|
||||||
|
content_type,
|
||||||
|
file,
|
||||||
|
}) = services().media.get(mxc.clone()).await?
|
||||||
|
{
|
||||||
|
Ok(get_content_as_filename::v3::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
content_disposition: Some(format!("inline; filename={}", body.filename)),
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
|
let remote_content_response =
|
||||||
|
get_remote_content(&mxc, &body.server_name, &body.media_id).await?;
|
||||||
|
|
||||||
|
Ok(get_content_as_filename::v3::Response {
|
||||||
|
content_disposition: Some(format!("inline: filename={}", body.filename)),
|
||||||
|
content_type: remote_content_response.content_type,
|
||||||
|
file: remote_content_response.file,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
} else {
|
||||||
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/media/r0/thumbnail/{serverName}/{mediaId}`
|
||||||
|
///
|
||||||
|
/// Load media thumbnail from our server or over federation.
|
||||||
|
///
|
||||||
|
/// - Only allows federation if `allow_remote` is true
|
||||||
|
pub async fn get_content_thumbnail_route(
|
||||||
|
body: Ruma<get_content_thumbnail::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_content_thumbnail::v3::Response> {
|
||||||
|
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
||||||
|
|
||||||
|
if let Some(FileMeta {
|
||||||
|
content_type, file, ..
|
||||||
|
}) = services()
|
||||||
|
.media
|
||||||
|
.get_thumbnail(
|
||||||
|
mxc.clone(),
|
||||||
|
body.width
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||||
|
body.height
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
||||||
|
)
|
||||||
|
.await?
|
||||||
|
{
|
||||||
|
Ok(get_content_thumbnail::v3::Response {
|
||||||
|
file,
|
||||||
|
content_type,
|
||||||
|
cross_origin_resource_policy: Some("cross-origin".to_owned()),
|
||||||
|
})
|
||||||
|
} else if &*body.server_name != services().globals.server_name() && body.allow_remote {
|
||||||
|
let get_thumbnail_response = services()
|
||||||
|
.sending
|
||||||
|
.send_federation_request(
|
||||||
|
&body.server_name,
|
||||||
|
get_content_thumbnail::v3::Request {
|
||||||
|
allow_remote: false,
|
||||||
|
height: body.height,
|
||||||
|
width: body.width,
|
||||||
|
method: body.method.clone(),
|
||||||
|
server_name: &body.server_name,
|
||||||
|
media_id: &body.media_id,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
services()
|
||||||
|
.media
|
||||||
|
.upload_thumbnail(
|
||||||
|
mxc,
|
||||||
|
None,
|
||||||
|
get_thumbnail_response.content_type.as_deref(),
|
||||||
|
body.width.try_into().expect("all UInts are valid u32s"),
|
||||||
|
body.height.try_into().expect("all UInts are valid u32s"),
|
||||||
|
&get_thumbnail_response.file,
|
||||||
|
)
|
||||||
|
.await?;
|
||||||
|
|
||||||
|
Ok(get_thumbnail_response)
|
||||||
|
} else {
|
||||||
|
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,271 @@
|
|||||||
|
use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{
|
||||||
|
error::ErrorKind,
|
||||||
|
message::{get_message_events, send_message_event},
|
||||||
|
},
|
||||||
|
events::{RoomEventType, StateEventType},
|
||||||
|
};
|
||||||
|
use std::{
|
||||||
|
collections::{BTreeMap, HashSet},
|
||||||
|
sync::Arc,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`
|
||||||
|
///
|
||||||
|
/// Send a message event into the room.
|
||||||
|
///
|
||||||
|
/// - Is a NOOP if the txn id was already used before and returns the same event id again
|
||||||
|
/// - The only requirement for the content is that it has to be valid json
|
||||||
|
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
||||||
|
pub async fn send_message_event_route(
|
||||||
|
body: Ruma<send_message_event::v3::IncomingRequest>,
|
||||||
|
) -> Result<send_message_event::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_deref();
|
||||||
|
|
||||||
|
let mutex_state = Arc::clone(
|
||||||
|
services()
|
||||||
|
.globals
|
||||||
|
.roomid_mutex_state
|
||||||
|
.write()
|
||||||
|
.unwrap()
|
||||||
|
.entry(body.room_id.clone())
|
||||||
|
.or_default(),
|
||||||
|
);
|
||||||
|
let state_lock = mutex_state.lock().await;
|
||||||
|
|
||||||
|
// Forbid m.room.encrypted if encryption is disabled
|
||||||
|
if RoomEventType::RoomEncrypted == body.event_type.to_string().into()
|
||||||
|
&& !services().globals.allow_encryption()
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Encryption has been disabled",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if this is a new transaction id
|
||||||
|
if let Some(response) =
|
||||||
|
services()
|
||||||
|
.transaction_ids
|
||||||
|
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
||||||
|
{
|
||||||
|
// The client might have sent a txnid of the /sendToDevice endpoint
|
||||||
|
// This txnid has no response associated with it
|
||||||
|
if response.is_empty() {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Tried to use txn id already used for an incompatible endpoint.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let event_id = utils::string_from_bytes(&response)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
|
||||||
|
return Ok(send_message_event::v3::Response { event_id });
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut unsigned = BTreeMap::new();
|
||||||
|
unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into());
|
||||||
|
|
||||||
|
let event_id = services().rooms.timeline.build_and_append_pdu(
|
||||||
|
PduBuilder {
|
||||||
|
event_type: body.event_type.to_string().into(),
|
||||||
|
content: serde_json::from_str(body.body.body.json().get())
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
||||||
|
unsigned: Some(unsigned),
|
||||||
|
state_key: None,
|
||||||
|
redacts: None,
|
||||||
|
},
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
&state_lock,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
services().transaction_ids.add_txnid(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.txn_id,
|
||||||
|
event_id.as_bytes(),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
drop(state_lock);
|
||||||
|
|
||||||
|
Ok(send_message_event::v3::Response::new(
|
||||||
|
(*event_id).to_owned(),
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
|
||||||
|
///
|
||||||
|
/// Allows paginating through room history.
|
||||||
|
///
|
||||||
|
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
||||||
|
/// joined, depending on history_visibility)
|
||||||
|
pub async fn get_message_events_route(
|
||||||
|
body: Ruma<get_message_events::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_message_events::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"You don't have permission to view this room.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
let from = match body.from.clone() {
|
||||||
|
Some(from) => from
|
||||||
|
.parse()
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?,
|
||||||
|
|
||||||
|
None => match body.dir {
|
||||||
|
ruma::api::client::Direction::Forward => 0,
|
||||||
|
ruma::api::client::Direction::Backward => u64::MAX,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
let to = body.to.as_ref().map(|t| t.parse());
|
||||||
|
|
||||||
|
services().rooms.lazy_loading.lazy_load_confirm_delivery(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
from,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
// Use limit or else 10
|
||||||
|
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
||||||
|
|
||||||
|
let next_token;
|
||||||
|
|
||||||
|
let mut resp = get_message_events::v3::Response::new();
|
||||||
|
|
||||||
|
let mut lazy_loaded = HashSet::new();
|
||||||
|
|
||||||
|
match body.dir {
|
||||||
|
ruma::api::client::Direction::Forward => {
|
||||||
|
let events_after: Vec<_> = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.pdus_after(sender_user, &body.room_id, from)?
|
||||||
|
.take(limit)
|
||||||
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
|
.filter_map(|(pdu_id, pdu)| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.pdu_count(&pdu_id)
|
||||||
|
.map(|pdu_count| (pdu_count, pdu))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for (_, event) in &events_after {
|
||||||
|
/* TODO: Remove this when these are resolved:
|
||||||
|
* https://github.com/vector-im/element-android/issues/3417
|
||||||
|
* https://github.com/vector-im/element-web/issues/21034
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
next_token = events_after.last().map(|(count, _)| count).copied();
|
||||||
|
|
||||||
|
let events_after: Vec<_> = events_after
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
resp.start = from.to_string();
|
||||||
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
|
resp.chunk = events_after;
|
||||||
|
}
|
||||||
|
ruma::api::client::Direction::Backward => {
|
||||||
|
let events_before: Vec<_> = services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.pdus_until(sender_user, &body.room_id, from)?
|
||||||
|
.take(limit)
|
||||||
|
.filter_map(|r| r.ok()) // Filter out buggy events
|
||||||
|
.filter_map(|(pdu_id, pdu)| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.pdu_count(&pdu_id)
|
||||||
|
.map(|pdu_count| (pdu_count, pdu))
|
||||||
|
.ok()
|
||||||
|
})
|
||||||
|
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
for (_, event) in &events_before {
|
||||||
|
/* TODO: Remove this when these are resolved:
|
||||||
|
* https://github.com/vector-im/element-android/issues/3417
|
||||||
|
* https://github.com/vector-im/element-web/issues/21034
|
||||||
|
if !services().rooms.lazy_loading.lazy_load_was_sent_before(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
&event.sender,
|
||||||
|
)? {
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
lazy_loaded.insert(event.sender.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
next_token = events_before.last().map(|(count, _)| count).copied();
|
||||||
|
|
||||||
|
let events_before: Vec<_> = events_before
|
||||||
|
.into_iter()
|
||||||
|
.map(|(_, pdu)| pdu.to_room_event())
|
||||||
|
.collect();
|
||||||
|
|
||||||
|
resp.start = from.to_string();
|
||||||
|
resp.end = next_token.map(|count| count.to_string());
|
||||||
|
resp.chunk = events_before;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resp.state = Vec::new();
|
||||||
|
for ll_id in &lazy_loaded {
|
||||||
|
if let Some(member_event) = services().rooms.state_accessor.room_state_get(
|
||||||
|
&body.room_id,
|
||||||
|
&StateEventType::RoomMember,
|
||||||
|
ll_id.as_str(),
|
||||||
|
)? {
|
||||||
|
resp.state.push(member_event.to_state_event());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: enable again when we are sure clients can handle it
|
||||||
|
/*
|
||||||
|
if let Some(next_token) = next_token {
|
||||||
|
services().rooms.lazy_loading.lazy_load_mark_sent(
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
&body.room_id,
|
||||||
|
lazy_loaded,
|
||||||
|
next_token,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
|
||||||
|
Ok(resp)
|
||||||
|
}
|
@ -0,0 +1,162 @@
|
|||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt},
|
||||||
|
events::{
|
||||||
|
receipt::{ReceiptThread, ReceiptType},
|
||||||
|
RoomAccountDataEventType,
|
||||||
|
},
|
||||||
|
MilliSecondsSinceUnixEpoch,
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
||||||
|
///
|
||||||
|
/// Sets different types of read markers.
|
||||||
|
///
|
||||||
|
/// - Updates fully-read account data event to `fully_read`
|
||||||
|
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
||||||
|
pub async fn set_read_marker_route(
|
||||||
|
body: Ruma<set_read_marker::v3::IncomingRequest>,
|
||||||
|
) -> Result<set_read_marker::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if let Some(fully_read) = &body.fully_read {
|
||||||
|
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||||
|
content: ruma::events::fully_read::FullyReadEventContent {
|
||||||
|
event_id: fully_read.clone(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::FullyRead,
|
||||||
|
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if body.private_read_receipt.is_some() || body.read_receipt.is_some() {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.reset_notification_counts(sender_user, &body.room_id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(event) = &body.private_read_receipt {
|
||||||
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
|
&body.room_id,
|
||||||
|
sender_user,
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_count(event)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Some(event) = &body.read_receipt {
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
thread: ReceiptThread::Unthreaded,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(event.to_owned(), receipts);
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(set_read_marker::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
||||||
|
///
|
||||||
|
/// Sets private read marker and public read receipt EDU.
|
||||||
|
pub async fn create_receipt_route(
|
||||||
|
body: Ruma<create_receipt::v3::IncomingRequest>,
|
||||||
|
) -> Result<create_receipt::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if matches!(
|
||||||
|
&body.receipt_type,
|
||||||
|
create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate
|
||||||
|
) {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.reset_notification_counts(sender_user, &body.room_id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
match body.receipt_type {
|
||||||
|
create_receipt::v3::ReceiptType::FullyRead => {
|
||||||
|
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
||||||
|
content: ruma::events::fully_read::FullyReadEventContent {
|
||||||
|
event_id: body.event_id.clone(),
|
||||||
|
},
|
||||||
|
};
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::FullyRead,
|
||||||
|
&serde_json::to_value(fully_read_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
create_receipt::v3::ReceiptType::Read => {
|
||||||
|
let mut user_receipts = BTreeMap::new();
|
||||||
|
user_receipts.insert(
|
||||||
|
sender_user.clone(),
|
||||||
|
ruma::events::receipt::Receipt {
|
||||||
|
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
||||||
|
thread: ReceiptThread::Unthreaded,
|
||||||
|
},
|
||||||
|
);
|
||||||
|
let mut receipts = BTreeMap::new();
|
||||||
|
receipts.insert(ReceiptType::Read, user_receipts);
|
||||||
|
|
||||||
|
let mut receipt_content = BTreeMap::new();
|
||||||
|
receipt_content.insert(body.event_id.to_owned(), receipts);
|
||||||
|
|
||||||
|
services().rooms.edus.read_receipt.readreceipt_update(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
ruma::events::receipt::ReceiptEvent {
|
||||||
|
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
||||||
|
room_id: body.room_id.clone(),
|
||||||
|
},
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
create_receipt::v3::ReceiptType::ReadPrivate => {
|
||||||
|
services().rooms.edus.read_receipt.private_read_set(
|
||||||
|
&body.room_id,
|
||||||
|
sender_user,
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.timeline
|
||||||
|
.get_pdu_count(&body.event_id)?
|
||||||
|
.ok_or(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Event does not exist.",
|
||||||
|
))?,
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
_ => return Err(Error::bad_database("Unsupported receipt type")),
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(create_receipt::v3::Response {})
|
||||||
|
}
|
@ -0,0 +1,69 @@
|
|||||||
|
use crate::{services, utils::HtmlEscape, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::{error::ErrorKind, room::report_content},
|
||||||
|
events::room::message,
|
||||||
|
int,
|
||||||
|
};
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}`
|
||||||
|
///
|
||||||
|
/// Reports an inappropriate event to homeserver admins
|
||||||
|
///
|
||||||
|
pub async fn report_event_route(
|
||||||
|
body: Ruma<report_content::v3::IncomingRequest>,
|
||||||
|
) -> Result<report_content::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? {
|
||||||
|
Some(pdu) => pdu,
|
||||||
|
_ => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Invalid Event ID",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Invalid score, must be within 0 to -100",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::InvalidParam,
|
||||||
|
"Reason too long, should be 250 characters or fewer",
|
||||||
|
));
|
||||||
|
};
|
||||||
|
|
||||||
|
services().admin
|
||||||
|
.send_message(message::RoomMessageEventContent::text_html(
|
||||||
|
format!(
|
||||||
|
"Report received from: {}\n\n\
|
||||||
|
Event ID: {:?}\n\
|
||||||
|
Room ID: {:?}\n\
|
||||||
|
Sent By: {:?}\n\n\
|
||||||
|
Report Score: {:?}\n\
|
||||||
|
Report Reason: {:?}",
|
||||||
|
sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason
|
||||||
|
),
|
||||||
|
format!(
|
||||||
|
"<details><summary>Report received from: <a href=\"https://matrix.to/#/{0:?}\">{0:?}\
|
||||||
|
</a></summary><ul><li>Event Info<ul><li>Event ID: <code>{1:?}</code>\
|
||||||
|
<a href=\"https://matrix.to/#/{2:?}/{1:?}\">🔗</a></li><li>Room ID: <code>{2:?}</code>\
|
||||||
|
</li><li>Sent By: <a href=\"https://matrix.to/#/{3:?}\">{3:?}</a></li></ul></li><li>\
|
||||||
|
Report Info<ul><li>Report Score: {4:?}</li><li>Report Reason: {5}</li></ul></li>\
|
||||||
|
</ul></details>",
|
||||||
|
sender_user,
|
||||||
|
pdu.event_id,
|
||||||
|
pdu.room_id,
|
||||||
|
pdu.sender,
|
||||||
|
body.score,
|
||||||
|
HtmlEscape(body.reason.as_deref().unwrap_or(""))
|
||||||
|
),
|
||||||
|
));
|
||||||
|
|
||||||
|
Ok(report_content::v3::Response {})
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,128 @@
|
|||||||
|
use crate::{services, Error, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::tag::{create_tag, delete_tag, get_tags},
|
||||||
|
events::{
|
||||||
|
tag::{TagEvent, TagEventContent},
|
||||||
|
RoomAccountDataEventType,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||||
|
///
|
||||||
|
/// Adds a tag to the room.
|
||||||
|
///
|
||||||
|
/// - Inserts the tag into the tag event of the room account data.
|
||||||
|
pub async fn update_tag_route(
|
||||||
|
body: Ruma<create_tag::v3::IncomingRequest>,
|
||||||
|
) -> Result<create_tag::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
tags_event
|
||||||
|
.content
|
||||||
|
.tags
|
||||||
|
.insert(body.tag.clone().into(), body.tag_info.clone());
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
&serde_json::to_value(tags_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(create_tag::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
||||||
|
///
|
||||||
|
/// Deletes a tag from the room.
|
||||||
|
///
|
||||||
|
/// - Removes the tag from the tag event of the room account data.
|
||||||
|
pub async fn delete_tag_route(
|
||||||
|
body: Ruma<delete_tag::v3::IncomingRequest>,
|
||||||
|
) -> Result<delete_tag::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
tags_event.content.tags.remove(&body.tag.clone().into());
|
||||||
|
|
||||||
|
services().account_data.update(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
&serde_json::to_value(tags_event).expect("to json value always works"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(delete_tag::v3::Response {})
|
||||||
|
}
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
||||||
|
///
|
||||||
|
/// Returns tags on the room.
|
||||||
|
///
|
||||||
|
/// - Gets the tag event of the room account data.
|
||||||
|
pub async fn get_tags_route(
|
||||||
|
body: Ruma<get_tags::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_tags::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let event = services().account_data.get(
|
||||||
|
Some(&body.room_id),
|
||||||
|
sender_user,
|
||||||
|
RoomAccountDataEventType::Tag,
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let tags_event = event
|
||||||
|
.map(|e| {
|
||||||
|
serde_json::from_str(e.get())
|
||||||
|
.map_err(|_| Error::bad_database("Invalid account data event in db."))
|
||||||
|
})
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
Ok(TagEvent {
|
||||||
|
content: TagEventContent {
|
||||||
|
tags: BTreeMap::new(),
|
||||||
|
},
|
||||||
|
})
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(get_tags::v3::Response {
|
||||||
|
tags: tags_event.content.tags,
|
||||||
|
})
|
||||||
|
}
|
@ -0,0 +1,16 @@
|
|||||||
|
use crate::{Result, Ruma};
|
||||||
|
use ruma::api::client::thirdparty::get_protocols;
|
||||||
|
|
||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/thirdparty/protocols`
|
||||||
|
///
|
||||||
|
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
||||||
|
pub async fn get_protocols_route(
|
||||||
|
_body: Ruma<get_protocols::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_protocols::v3::Response> {
|
||||||
|
// TODO
|
||||||
|
Ok(get_protocols::v3::Response {
|
||||||
|
protocols: BTreeMap::new(),
|
||||||
|
})
|
||||||
|
}
|
@ -0,0 +1,40 @@
|
|||||||
|
use crate::{services, utils, Error, Result, Ruma};
|
||||||
|
use ruma::api::client::{error::ErrorKind, typing::create_typing_event};
|
||||||
|
|
||||||
|
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
||||||
|
///
|
||||||
|
/// Sets the typing state of the sender user.
|
||||||
|
pub async fn create_typing_event_route(
|
||||||
|
body: Ruma<create_typing_event::v3::IncomingRequest>,
|
||||||
|
) -> Result<create_typing_event::v3::Response> {
|
||||||
|
use create_typing_event::v3::Typing;
|
||||||
|
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
if !services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.is_joined(sender_user, &body.room_id)?
|
||||||
|
{
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"You are not in this room.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
if let Typing::Yes(duration) = body.state {
|
||||||
|
services().rooms.edus.typing.typing_add(
|
||||||
|
sender_user,
|
||||||
|
&body.room_id,
|
||||||
|
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
||||||
|
)?;
|
||||||
|
} else {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.edus
|
||||||
|
.typing
|
||||||
|
.typing_remove(sender_user, &body.room_id)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(create_typing_event::v3::Response {})
|
||||||
|
}
|
@ -0,0 +1,31 @@
|
|||||||
|
use std::{collections::BTreeMap, iter::FromIterator};
|
||||||
|
|
||||||
|
use ruma::api::client::discovery::get_supported_versions;
|
||||||
|
|
||||||
|
use crate::{Result, Ruma};
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/versions`
|
||||||
|
///
|
||||||
|
/// Get the versions of the specification and unstable features supported by this server.
|
||||||
|
///
|
||||||
|
/// - Versions take the form MAJOR.MINOR.PATCH
|
||||||
|
/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value
|
||||||
|
/// - Unstable features are namespaced and may include version information in their name
|
||||||
|
///
|
||||||
|
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
||||||
|
/// unstable features in their stable releases
|
||||||
|
pub async fn get_supported_versions_route(
|
||||||
|
_body: Ruma<get_supported_versions::IncomingRequest>,
|
||||||
|
) -> Result<get_supported_versions::Response> {
|
||||||
|
let resp = get_supported_versions::Response {
|
||||||
|
versions: vec![
|
||||||
|
"r0.5.0".to_owned(),
|
||||||
|
"r0.6.0".to_owned(),
|
||||||
|
"v1.1".to_owned(),
|
||||||
|
"v1.2".to_owned(),
|
||||||
|
],
|
||||||
|
unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]),
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(resp)
|
||||||
|
}
|
@ -0,0 +1,94 @@
|
|||||||
|
use crate::{services, Result, Ruma};
|
||||||
|
use ruma::{
|
||||||
|
api::client::user_directory::search_users,
|
||||||
|
events::{
|
||||||
|
room::join_rules::{JoinRule, RoomJoinRulesEventContent},
|
||||||
|
StateEventType,
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
|
/// # `POST /_matrix/client/r0/user_directory/search`
|
||||||
|
///
|
||||||
|
/// Searches all known users for a match.
|
||||||
|
///
|
||||||
|
/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public)
|
||||||
|
/// and don't share a room with the sender
|
||||||
|
pub async fn search_users_route(
|
||||||
|
body: Ruma<search_users::v3::IncomingRequest>,
|
||||||
|
) -> Result<search_users::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
let limit = u64::from(body.limit) as usize;
|
||||||
|
|
||||||
|
let mut users = services().users.iter().filter_map(|user_id| {
|
||||||
|
// Filter out buggy users (they should not exist, but you never know...)
|
||||||
|
let user_id = user_id.ok()?;
|
||||||
|
|
||||||
|
let user = search_users::v3::User {
|
||||||
|
user_id: user_id.clone(),
|
||||||
|
display_name: services().users.displayname(&user_id).ok()?,
|
||||||
|
avatar_url: services().users.avatar_url(&user_id).ok()?,
|
||||||
|
};
|
||||||
|
|
||||||
|
let user_id_matches = user
|
||||||
|
.user_id
|
||||||
|
.to_string()
|
||||||
|
.to_lowercase()
|
||||||
|
.contains(&body.search_term.to_lowercase());
|
||||||
|
|
||||||
|
let user_displayname_matches = user
|
||||||
|
.display_name
|
||||||
|
.as_ref()
|
||||||
|
.filter(|name| {
|
||||||
|
name.to_lowercase()
|
||||||
|
.contains(&body.search_term.to_lowercase())
|
||||||
|
})
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
if !user_id_matches && !user_displayname_matches {
|
||||||
|
return None;
|
||||||
|
}
|
||||||
|
|
||||||
|
let user_is_in_public_rooms = services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_joined(&user_id)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
.any(|room| {
|
||||||
|
services()
|
||||||
|
.rooms
|
||||||
|
.state_accessor
|
||||||
|
.room_state_get(&room, &StateEventType::RoomJoinRules, "")
|
||||||
|
.map_or(false, |event| {
|
||||||
|
event.map_or(false, |event| {
|
||||||
|
serde_json::from_str(event.content.get())
|
||||||
|
.map_or(false, |r: RoomJoinRulesEventContent| {
|
||||||
|
r.join_rule == JoinRule::Public
|
||||||
|
})
|
||||||
|
})
|
||||||
|
})
|
||||||
|
});
|
||||||
|
|
||||||
|
if user_is_in_public_rooms {
|
||||||
|
return Some(user);
|
||||||
|
}
|
||||||
|
|
||||||
|
let user_is_in_shared_rooms = services()
|
||||||
|
.rooms
|
||||||
|
.user
|
||||||
|
.get_shared_rooms(vec![sender_user.clone(), user_id])
|
||||||
|
.ok()?
|
||||||
|
.next()
|
||||||
|
.is_some();
|
||||||
|
|
||||||
|
if user_is_in_shared_rooms {
|
||||||
|
return Some(user);
|
||||||
|
}
|
||||||
|
|
||||||
|
None
|
||||||
|
});
|
||||||
|
|
||||||
|
let results = users.by_ref().take(limit).collect();
|
||||||
|
let limited = users.next().is_some();
|
||||||
|
|
||||||
|
Ok(search_users::v3::Response { results, limited })
|
||||||
|
}
|
@ -0,0 +1,47 @@
|
|||||||
|
use crate::{services, Result, Ruma};
|
||||||
|
use hmac::{Hmac, Mac};
|
||||||
|
use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch};
|
||||||
|
use sha1::Sha1;
|
||||||
|
use std::time::{Duration, SystemTime};
|
||||||
|
|
||||||
|
type HmacSha1 = Hmac<Sha1>;
|
||||||
|
|
||||||
|
/// # `GET /_matrix/client/r0/voip/turnServer`
|
||||||
|
///
|
||||||
|
/// TODO: Returns information about the recommended turn server.
|
||||||
|
pub async fn turn_server_route(
|
||||||
|
body: Ruma<get_turn_server_info::v3::IncomingRequest>,
|
||||||
|
) -> Result<get_turn_server_info::v3::Response> {
|
||||||
|
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
||||||
|
|
||||||
|
let turn_secret = services().globals.turn_secret().clone();
|
||||||
|
|
||||||
|
let (username, password) = if !turn_secret.is_empty() {
|
||||||
|
let expiry = SecondsSinceUnixEpoch::from_system_time(
|
||||||
|
SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()),
|
||||||
|
)
|
||||||
|
.expect("time is valid");
|
||||||
|
|
||||||
|
let username: String = format!("{}:{}", expiry.get(), sender_user);
|
||||||
|
|
||||||
|
let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes())
|
||||||
|
.expect("HMAC can take key of any size");
|
||||||
|
mac.update(username.as_bytes());
|
||||||
|
|
||||||
|
let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD);
|
||||||
|
|
||||||
|
(username, password)
|
||||||
|
} else {
|
||||||
|
(
|
||||||
|
services().globals.turn_username().clone(),
|
||||||
|
services().globals.turn_password().clone(),
|
||||||
|
)
|
||||||
|
};
|
||||||
|
|
||||||
|
Ok(get_turn_server_info::v3::Response {
|
||||||
|
username,
|
||||||
|
password,
|
||||||
|
uris: services().globals.turn_uris().to_vec(),
|
||||||
|
ttl: Duration::from_secs(services().globals.turn_ttl()),
|
||||||
|
})
|
||||||
|
}
|
@ -0,0 +1,4 @@
|
|||||||
|
pub mod appservice_server;
|
||||||
|
pub mod client_server;
|
||||||
|
pub mod ruma_wrapper;
|
||||||
|
pub mod server_server;
|
@ -0,0 +1,364 @@
|
|||||||
|
use std::{collections::BTreeMap, iter::FromIterator, str};
|
||||||
|
|
||||||
|
use axum::{
|
||||||
|
async_trait,
|
||||||
|
body::{Full, HttpBody},
|
||||||
|
extract::{
|
||||||
|
rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader,
|
||||||
|
},
|
||||||
|
headers::{
|
||||||
|
authorization::{Bearer, Credentials},
|
||||||
|
Authorization,
|
||||||
|
},
|
||||||
|
response::{IntoResponse, Response},
|
||||||
|
BoxError,
|
||||||
|
};
|
||||||
|
use bytes::{BufMut, Bytes, BytesMut};
|
||||||
|
use http::StatusCode;
|
||||||
|
use ruma::{
|
||||||
|
api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse},
|
||||||
|
CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId,
|
||||||
|
};
|
||||||
|
use serde::Deserialize;
|
||||||
|
use tracing::{debug, error, warn};
|
||||||
|
|
||||||
|
use super::{Ruma, RumaResponse};
|
||||||
|
use crate::{services, Error, Result};
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl<T, B> FromRequest<B> for Ruma<T>
|
||||||
|
where
|
||||||
|
T: IncomingRequest,
|
||||||
|
B: HttpBody + Send,
|
||||||
|
B::Data: Send,
|
||||||
|
B::Error: Into<BoxError>,
|
||||||
|
{
|
||||||
|
type Rejection = Error;
|
||||||
|
|
||||||
|
async fn from_request(req: &mut RequestParts<B>) -> Result<Self, Self::Rejection> {
|
||||||
|
#[derive(Deserialize)]
|
||||||
|
struct QueryParams {
|
||||||
|
access_token: Option<String>,
|
||||||
|
user_id: Option<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
let metadata = T::METADATA;
|
||||||
|
let auth_header = Option::<TypedHeader<Authorization<Bearer>>>::from_request(req).await?;
|
||||||
|
let path_params = Path::<Vec<String>>::from_request(req).await?;
|
||||||
|
|
||||||
|
let query = req.uri().query().unwrap_or_default();
|
||||||
|
let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) {
|
||||||
|
Ok(params) => params,
|
||||||
|
Err(e) => {
|
||||||
|
error!(%query, "Failed to deserialize query parameters: {}", e);
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Unknown,
|
||||||
|
"Failed to read query parameters",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let token = match &auth_header {
|
||||||
|
Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()),
|
||||||
|
None => query_params.access_token.as_deref(),
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut body = Bytes::from_request(req)
|
||||||
|
.await
|
||||||
|
.map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?;
|
||||||
|
|
||||||
|
let mut json_body = serde_json::from_slice::<CanonicalJsonValue>(&body).ok();
|
||||||
|
|
||||||
|
let appservices = services().appservice.all().unwrap();
|
||||||
|
let appservice_registration = appservices.iter().find(|(_id, registration)| {
|
||||||
|
registration
|
||||||
|
.get("as_token")
|
||||||
|
.and_then(|as_token| as_token.as_str())
|
||||||
|
.map_or(false, |as_token| token == Some(as_token))
|
||||||
|
});
|
||||||
|
|
||||||
|
let (sender_user, sender_device, sender_servername, from_appservice) =
|
||||||
|
if let Some((_id, registration)) = appservice_registration {
|
||||||
|
match metadata.authentication {
|
||||||
|
AuthScheme::AccessToken => {
|
||||||
|
let user_id = query_params.user_id.map_or_else(
|
||||||
|
|| {
|
||||||
|
UserId::parse_with_server_name(
|
||||||
|
registration
|
||||||
|
.get("sender_localpart")
|
||||||
|
.unwrap()
|
||||||
|
.as_str()
|
||||||
|
.unwrap(),
|
||||||
|
services().globals.server_name(),
|
||||||
|
)
|
||||||
|
.unwrap()
|
||||||
|
},
|
||||||
|
|s| UserId::parse(s).unwrap(),
|
||||||
|
);
|
||||||
|
|
||||||
|
if !services().users.exists(&user_id).unwrap() {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"User does not exist.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO: Check if appservice is allowed to be that user
|
||||||
|
(Some(user_id), None, None, true)
|
||||||
|
}
|
||||||
|
AuthScheme::ServerSignatures => (None, None, None, true),
|
||||||
|
AuthScheme::None => (None, None, None, true),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
match metadata.authentication {
|
||||||
|
AuthScheme::AccessToken => {
|
||||||
|
let token = match token {
|
||||||
|
Some(token) => token,
|
||||||
|
_ => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::MissingToken,
|
||||||
|
"Missing access token.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
match services().users.find_from_token(token).unwrap() {
|
||||||
|
None => {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::UnknownToken { soft_logout: false },
|
||||||
|
"Unknown access token.",
|
||||||
|
))
|
||||||
|
}
|
||||||
|
Some((user_id, device_id)) => (
|
||||||
|
Some(user_id),
|
||||||
|
Some(OwnedDeviceId::from(device_id)),
|
||||||
|
None,
|
||||||
|
false,
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AuthScheme::ServerSignatures => {
|
||||||
|
let TypedHeader(Authorization(x_matrix)) =
|
||||||
|
TypedHeader::<Authorization<XMatrix>>::from_request(req)
|
||||||
|
.await
|
||||||
|
.map_err(|e| {
|
||||||
|
warn!("Missing or invalid Authorization header: {}", e);
|
||||||
|
|
||||||
|
let msg = match e.reason() {
|
||||||
|
TypedHeaderRejectionReason::Missing => {
|
||||||
|
"Missing Authorization header."
|
||||||
|
}
|
||||||
|
TypedHeaderRejectionReason::Error(_) => {
|
||||||
|
"Invalid X-Matrix signatures."
|
||||||
|
}
|
||||||
|
_ => "Unknown header-related error",
|
||||||
|
};
|
||||||
|
|
||||||
|
Error::BadRequest(ErrorKind::Forbidden, msg)
|
||||||
|
})?;
|
||||||
|
|
||||||
|
let origin_signatures = BTreeMap::from_iter([(
|
||||||
|
x_matrix.key.clone(),
|
||||||
|
CanonicalJsonValue::String(x_matrix.sig),
|
||||||
|
)]);
|
||||||
|
|
||||||
|
let signatures = BTreeMap::from_iter([(
|
||||||
|
x_matrix.origin.as_str().to_owned(),
|
||||||
|
CanonicalJsonValue::Object(origin_signatures),
|
||||||
|
)]);
|
||||||
|
|
||||||
|
let mut request_map = BTreeMap::from_iter([
|
||||||
|
(
|
||||||
|
"method".to_owned(),
|
||||||
|
CanonicalJsonValue::String(req.method().to_string()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"uri".to_owned(),
|
||||||
|
CanonicalJsonValue::String(req.uri().to_string()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"origin".to_owned(),
|
||||||
|
CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"destination".to_owned(),
|
||||||
|
CanonicalJsonValue::String(
|
||||||
|
services().globals.server_name().as_str().to_owned(),
|
||||||
|
),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"signatures".to_owned(),
|
||||||
|
CanonicalJsonValue::Object(signatures),
|
||||||
|
),
|
||||||
|
]);
|
||||||
|
|
||||||
|
if let Some(json_body) = &json_body {
|
||||||
|
request_map.insert("content".to_owned(), json_body.clone());
|
||||||
|
};
|
||||||
|
|
||||||
|
let keys_result = services()
|
||||||
|
.rooms
|
||||||
|
.event_handler
|
||||||
|
.fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()])
|
||||||
|
.await;
|
||||||
|
|
||||||
|
let keys = match keys_result {
|
||||||
|
Ok(b) => b,
|
||||||
|
Err(e) => {
|
||||||
|
warn!("Failed to fetch signing keys: {}", e);
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Failed to fetch signing keys.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let pub_key_map =
|
||||||
|
BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]);
|
||||||
|
|
||||||
|
match ruma::signatures::verify_json(&pub_key_map, &request_map) {
|
||||||
|
Ok(()) => (None, None, Some(x_matrix.origin), false),
|
||||||
|
Err(e) => {
|
||||||
|
warn!(
|
||||||
|
"Failed to verify json request from {}: {}\n{:?}",
|
||||||
|
x_matrix.origin, e, request_map
|
||||||
|
);
|
||||||
|
|
||||||
|
if req.uri().to_string().contains('@') {
|
||||||
|
warn!(
|
||||||
|
"Request uri contained '@' character. Make sure your \
|
||||||
|
reverse proxy gives Conduit the raw uri (apache: use \
|
||||||
|
nocanon)"
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::Forbidden,
|
||||||
|
"Failed to verify X-Matrix signatures.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
AuthScheme::None => (None, None, None, false),
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
let mut http_request = http::Request::builder().uri(req.uri()).method(req.method());
|
||||||
|
*http_request.headers_mut().unwrap() = req.headers().clone();
|
||||||
|
|
||||||
|
if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body {
|
||||||
|
let user_id = sender_user.clone().unwrap_or_else(|| {
|
||||||
|
UserId::parse_with_server_name("", services().globals.server_name())
|
||||||
|
.expect("we know this is valid")
|
||||||
|
});
|
||||||
|
|
||||||
|
let uiaa_request = json_body
|
||||||
|
.get("auth")
|
||||||
|
.and_then(|auth| auth.as_object())
|
||||||
|
.and_then(|auth| auth.get("session"))
|
||||||
|
.and_then(|session| session.as_str())
|
||||||
|
.and_then(|session| {
|
||||||
|
services().uiaa.get_uiaa_request(
|
||||||
|
&user_id,
|
||||||
|
&sender_device.clone().unwrap_or_else(|| "".into()),
|
||||||
|
session,
|
||||||
|
)
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request {
|
||||||
|
for (key, value) in initial_request {
|
||||||
|
json_body.entry(key).or_insert(value);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut buf = BytesMut::new().writer();
|
||||||
|
serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail");
|
||||||
|
body = buf.into_inner().freeze();
|
||||||
|
}
|
||||||
|
|
||||||
|
let http_request = http_request.body(&*body).unwrap();
|
||||||
|
|
||||||
|
debug!("{:?}", http_request);
|
||||||
|
|
||||||
|
let body = T::try_from_http_request(http_request, &path_params).map_err(|e| {
|
||||||
|
warn!("{:?}\n{:?}", e, json_body);
|
||||||
|
Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.")
|
||||||
|
})?;
|
||||||
|
|
||||||
|
Ok(Ruma {
|
||||||
|
body,
|
||||||
|
sender_user,
|
||||||
|
sender_device,
|
||||||
|
sender_servername,
|
||||||
|
from_appservice,
|
||||||
|
json_body,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
struct XMatrix {
|
||||||
|
origin: OwnedServerName,
|
||||||
|
key: String, // KeyName?
|
||||||
|
sig: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Credentials for XMatrix {
|
||||||
|
const SCHEME: &'static str = "X-Matrix";
|
||||||
|
|
||||||
|
fn decode(value: &http::HeaderValue) -> Option<Self> {
|
||||||
|
debug_assert!(
|
||||||
|
value.as_bytes().starts_with(b"X-Matrix "),
|
||||||
|
"HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}",
|
||||||
|
value,
|
||||||
|
);
|
||||||
|
|
||||||
|
let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..])
|
||||||
|
.ok()?
|
||||||
|
.trim_start();
|
||||||
|
|
||||||
|
let mut origin = None;
|
||||||
|
let mut key = None;
|
||||||
|
let mut sig = None;
|
||||||
|
|
||||||
|
for entry in parameters.split_terminator(',') {
|
||||||
|
let (name, value) = entry.split_once('=')?;
|
||||||
|
|
||||||
|
// It's not at all clear why some fields are quoted and others not in the spec,
|
||||||
|
// let's simply accept either form for every field.
|
||||||
|
let value = value
|
||||||
|
.strip_prefix('"')
|
||||||
|
.and_then(|rest| rest.strip_suffix('"'))
|
||||||
|
.unwrap_or(value);
|
||||||
|
|
||||||
|
// FIXME: Catch multiple fields of the same name
|
||||||
|
match name {
|
||||||
|
"origin" => origin = Some(value.try_into().ok()?),
|
||||||
|
"key" => key = Some(value.to_owned()),
|
||||||
|
"sig" => sig = Some(value.to_owned()),
|
||||||
|
_ => debug!(
|
||||||
|
"Unexpected field `{}` in X-Matrix Authorization header",
|
||||||
|
name
|
||||||
|
),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Some(Self {
|
||||||
|
origin: origin?,
|
||||||
|
key: key?,
|
||||||
|
sig: sig?,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn encode(&self) -> http::HeaderValue {
|
||||||
|
todo!()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T: OutgoingResponse> IntoResponse for RumaResponse<T> {
|
||||||
|
fn into_response(self) -> Response {
|
||||||
|
match self.0.try_into_http_response::<BytesMut>() {
|
||||||
|
Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(),
|
||||||
|
Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,43 @@
|
|||||||
|
use crate::Error;
|
||||||
|
use ruma::{
|
||||||
|
api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName,
|
||||||
|
OwnedUserId,
|
||||||
|
};
|
||||||
|
use std::ops::Deref;
|
||||||
|
|
||||||
|
#[cfg(feature = "conduit_bin")]
|
||||||
|
mod axum;
|
||||||
|
|
||||||
|
/// Extractor for Ruma request structs
|
||||||
|
pub struct Ruma<T> {
|
||||||
|
pub body: T,
|
||||||
|
pub sender_user: Option<OwnedUserId>,
|
||||||
|
pub sender_device: Option<OwnedDeviceId>,
|
||||||
|
pub sender_servername: Option<OwnedServerName>,
|
||||||
|
// This is None when body is not a valid string
|
||||||
|
pub json_body: Option<CanonicalJsonValue>,
|
||||||
|
pub from_appservice: bool,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl<T> Deref for Ruma<T> {
|
||||||
|
type Target = T;
|
||||||
|
|
||||||
|
fn deref(&self) -> &Self::Target {
|
||||||
|
&self.body
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone)]
|
||||||
|
pub struct RumaResponse<T>(pub T);
|
||||||
|
|
||||||
|
impl<T> From<T> for RumaResponse<T> {
|
||||||
|
fn from(t: T) -> Self {
|
||||||
|
Self(t)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl From<Error> for RumaResponse<UiaaResponse> {
|
||||||
|
fn from(t: Error) -> Self {
|
||||||
|
t.to_response()
|
||||||
|
}
|
||||||
|
}
|
File diff suppressed because it is too large
Load Diff
@ -1,765 +0,0 @@
|
|||||||
use std::{
|
|
||||||
collections::BTreeMap,
|
|
||||||
convert::{TryFrom, TryInto},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH};
|
|
||||||
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
r0::{
|
|
||||||
account::{
|
|
||||||
change_password, deactivate, get_username_availability, register, whoami,
|
|
||||||
ThirdPartyIdRemovalStatus,
|
|
||||||
},
|
|
||||||
contact::get_contacts,
|
|
||||||
uiaa::{AuthFlow, AuthType, UiaaInfo},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
events::{
|
|
||||||
room::{
|
|
||||||
canonical_alias::RoomCanonicalAliasEventContent,
|
|
||||||
create::RoomCreateEventContent,
|
|
||||||
guest_access::{GuestAccess, RoomGuestAccessEventContent},
|
|
||||||
history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent},
|
|
||||||
join_rules::{JoinRule, RoomJoinRulesEventContent},
|
|
||||||
member::{MembershipState, RoomMemberEventContent},
|
|
||||||
message::RoomMessageEventContent,
|
|
||||||
name::RoomNameEventContent,
|
|
||||||
power_levels::RoomPowerLevelsEventContent,
|
|
||||||
topic::RoomTopicEventContent,
|
|
||||||
},
|
|
||||||
EventType,
|
|
||||||
},
|
|
||||||
identifiers::RoomName,
|
|
||||||
push, RoomAliasId, RoomId, RoomVersionId, UserId,
|
|
||||||
};
|
|
||||||
use serde_json::value::to_raw_value;
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
use register::RegistrationKind;
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::{get, post};
|
|
||||||
|
|
||||||
const GUEST_NAME_LENGTH: usize = 10;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/register/available`
|
|
||||||
///
|
|
||||||
/// Checks if a username is valid and available on this server.
|
|
||||||
///
|
|
||||||
/// Conditions for returning true:
|
|
||||||
/// - The user id is not historical
|
|
||||||
/// - The server name of the user id matches this server
|
|
||||||
/// - No user or appservice on this server already claimed this username
|
|
||||||
///
|
|
||||||
/// Note: This will not reserve the username, so the username might become invalid when trying to register
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/register/available", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_register_available_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_username_availability::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_username_availability::Response> {
|
|
||||||
// Validate user id
|
|
||||||
let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name())
|
|
||||||
.ok()
|
|
||||||
.filter(|user_id| {
|
|
||||||
!user_id.is_historical() && user_id.server_name() == db.globals.server_name()
|
|
||||||
})
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidUsername,
|
|
||||||
"Username is invalid.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
// Check if username is creative enough
|
|
||||||
if db.users.exists(&user_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UserInUse,
|
|
||||||
"Desired user ID is already taken.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// TODO add check for appservice namespaces
|
|
||||||
|
|
||||||
// If no if check is true we have an username that's available to be used.
|
|
||||||
Ok(get_username_availability::Response { available: true }.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/register`
|
|
||||||
///
|
|
||||||
/// Register an account on this homeserver.
|
|
||||||
///
|
|
||||||
/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html)
|
|
||||||
/// to check if the user id is valid and available.
|
|
||||||
///
|
|
||||||
/// - Only works if registration is enabled
|
|
||||||
/// - If type is guest: ignores all parameters except initial_device_display_name
|
|
||||||
/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage)
|
|
||||||
/// - If type is not guest and no username is given: Always fails after UIAA check
|
|
||||||
/// - Creates a new account and populates it with default account data
|
|
||||||
/// - If `inhibit_login` is false: Creates a device and returns device id and access_token
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
post("/_matrix/client/r0/register", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn register_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<register::Request<'_>>,
|
|
||||||
) -> ConduitResult<register::Response> {
|
|
||||||
if !db.globals.allow_registration() && !body.from_appservice {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"Registration has been disabled.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let is_guest = body.kind == RegistrationKind::Guest;
|
|
||||||
|
|
||||||
let mut missing_username = false;
|
|
||||||
|
|
||||||
// Validate user id
|
|
||||||
let user_id = UserId::parse_with_server_name(
|
|
||||||
if is_guest {
|
|
||||||
utils::random_string(GUEST_NAME_LENGTH)
|
|
||||||
} else {
|
|
||||||
body.username.clone().unwrap_or_else(|| {
|
|
||||||
// If the user didn't send a username field, that means the client is just trying
|
|
||||||
// the get an UIAA error to see available flows
|
|
||||||
missing_username = true;
|
|
||||||
// Just give the user a random name. He won't be able to register with it anyway.
|
|
||||||
utils::random_string(GUEST_NAME_LENGTH)
|
|
||||||
})
|
|
||||||
}
|
|
||||||
.to_lowercase(),
|
|
||||||
db.globals.server_name(),
|
|
||||||
)
|
|
||||||
.ok()
|
|
||||||
.filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name())
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidUsername,
|
|
||||||
"Username is invalid.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
// Check if username is creative enough
|
|
||||||
if db.users.exists(&user_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::UserInUse,
|
|
||||||
"Desired user ID is already taken.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// UIAA
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Dummy],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if !body.from_appservice {
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
|
||||||
&UserId::parse_with_server_name("", db.globals.server_name())
|
|
||||||
.expect("we know this is valid"),
|
|
||||||
"".into(),
|
|
||||||
auth,
|
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
db.uiaa.create(
|
|
||||||
&UserId::parse_with_server_name("", db.globals.server_name())
|
|
||||||
.expect("we know this is valid"),
|
|
||||||
"".into(),
|
|
||||||
&uiaainfo,
|
|
||||||
&json,
|
|
||||||
)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if missing_username {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::MissingParam,
|
|
||||||
"Missing username field.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let password = if is_guest {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
body.password.as_deref()
|
|
||||||
};
|
|
||||||
|
|
||||||
// Create user
|
|
||||||
db.users.create(&user_id, password)?;
|
|
||||||
|
|
||||||
// Default to pretty displayname
|
|
||||||
let displayname = format!("{} ⚡️", user_id.localpart());
|
|
||||||
db.users
|
|
||||||
.set_displayname(&user_id, Some(displayname.clone()))?;
|
|
||||||
|
|
||||||
// Initial account data
|
|
||||||
db.account_data.update(
|
|
||||||
None,
|
|
||||||
&user_id,
|
|
||||||
EventType::PushRules,
|
|
||||||
&ruma::events::push_rules::PushRulesEvent {
|
|
||||||
content: ruma::events::push_rules::PushRulesEventContent {
|
|
||||||
global: push::Ruleset::server_default(&user_id),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Inhibit login does not work for guests
|
|
||||||
if !is_guest && body.inhibit_login {
|
|
||||||
return Ok(register::Response {
|
|
||||||
access_token: None,
|
|
||||||
user_id,
|
|
||||||
device_id: None,
|
|
||||||
}
|
|
||||||
.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generate new device id if the user didn't specify one
|
|
||||||
let device_id = if is_guest {
|
|
||||||
None
|
|
||||||
} else {
|
|
||||||
body.device_id.clone()
|
|
||||||
}
|
|
||||||
.unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into());
|
|
||||||
|
|
||||||
// Generate new token for the device
|
|
||||||
let token = utils::random_string(TOKEN_LENGTH);
|
|
||||||
|
|
||||||
// Create device for this account
|
|
||||||
db.users.create_device(
|
|
||||||
&user_id,
|
|
||||||
&device_id,
|
|
||||||
&token,
|
|
||||||
body.initial_device_display_name.clone(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// If this is the first user on this server, create the admin room
|
|
||||||
if db.users.count()? == 1 {
|
|
||||||
// Create a user for the server
|
|
||||||
let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name())
|
|
||||||
.expect("@conduit:server_name is valid");
|
|
||||||
|
|
||||||
db.users.create(&conduit_user, None)?;
|
|
||||||
|
|
||||||
let room_id = RoomId::new(db.globals.server_name());
|
|
||||||
|
|
||||||
db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?;
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
let mut content = RoomCreateEventContent::new(conduit_user.clone());
|
|
||||||
content.federate = true;
|
|
||||||
content.predecessor = None;
|
|
||||||
content.room_version = RoomVersionId::Version6;
|
|
||||||
|
|
||||||
// 1. The room create event
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomCreate,
|
|
||||||
content: to_raw_value(&content).expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 2. Make conduit bot join
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Join,
|
|
||||||
displayname: None,
|
|
||||||
avatar_url: None,
|
|
||||||
is_direct: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: None,
|
|
||||||
reason: None,
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(conduit_user.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 3. Power levels
|
|
||||||
let mut users = BTreeMap::new();
|
|
||||||
users.insert(conduit_user.clone(), 100.into());
|
|
||||||
users.insert(user_id.clone(), 100.into());
|
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomPowerLevels,
|
|
||||||
content: to_raw_value(&RoomPowerLevelsEventContent {
|
|
||||||
users,
|
|
||||||
..Default::default()
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 4.1 Join Rules
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomJoinRules,
|
|
||||||
content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 4.2 History Visibility
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomHistoryVisibility,
|
|
||||||
content: to_raw_value(&RoomHistoryVisibilityEventContent::new(
|
|
||||||
HistoryVisibility::Shared,
|
|
||||||
))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 4.3 Guest Access
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomGuestAccess,
|
|
||||||
content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// 6. Events implied by name and topic
|
|
||||||
let room_name =
|
|
||||||
Box::<RoomName>::try_from(format!("{} Admin Room", db.globals.server_name()))
|
|
||||||
.expect("Room name is valid");
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomName,
|
|
||||||
content: to_raw_value(&RoomNameEventContent::new(Some(room_name)))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomTopic,
|
|
||||||
content: to_raw_value(&RoomTopicEventContent {
|
|
||||||
topic: format!("Manage {}", db.globals.server_name()),
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Room alias
|
|
||||||
let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name())
|
|
||||||
.try_into()
|
|
||||||
.expect("#admins:server_name is a valid alias name");
|
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomCanonicalAlias,
|
|
||||||
content: to_raw_value(&RoomCanonicalAliasEventContent {
|
|
||||||
alias: Some(alias.clone()),
|
|
||||||
alt_aliases: Vec::new(),
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some("".to_owned()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?;
|
|
||||||
|
|
||||||
// Invite and join the real user
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Invite,
|
|
||||||
displayname: None,
|
|
||||||
avatar_url: None,
|
|
||||||
is_direct: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: None,
|
|
||||||
reason: None,
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(user_id.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomMember,
|
|
||||||
content: to_raw_value(&RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Join,
|
|
||||||
displayname: Some(displayname),
|
|
||||||
avatar_url: None,
|
|
||||||
is_direct: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: None,
|
|
||||||
reason: None,
|
|
||||||
})
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(user_id.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&user_id,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
// Send welcome message
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomMessage,
|
|
||||||
content: to_raw_value(&RoomMessageEventContent::text_html(
|
|
||||||
"## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(),
|
|
||||||
"<h2>Thank you for trying out Conduit!</h2>\n<p>Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.</p>\n<p>Helpful links:</p>\n<blockquote>\n<p>Website: https://conduit.rs<br>Git and Documentation: https://gitlab.com/famedly/conduit<br>Report issues: https://gitlab.com/famedly/conduit/-/issues</p>\n</blockquote>\n<p>Here are some rooms you can join (by typing the command):</p>\n<p>Conduit room (Ask questions and get notified on updates):<br><code>/join #conduit:fachschaften.org</code></p>\n<p>Conduit lounge (Off-topic, only Conduit users are allowed to join)<br><code>/join #conduit-lounge:conduit.rs</code></p>\n".to_owned(),
|
|
||||||
))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: None,
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
info!("{} registered on this server", user_id);
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(register::Response {
|
|
||||||
access_token: Some(token),
|
|
||||||
user_id,
|
|
||||||
device_id: Some(device_id),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/account/password`
|
|
||||||
///
|
|
||||||
/// Changes the password of this account.
|
|
||||||
///
|
|
||||||
/// - Requires UIAA to verify user password
|
|
||||||
/// - Changes the password of the sender user
|
|
||||||
/// - The password hash is calculated using argon2 with 32 character salt, the plain password is
|
|
||||||
/// not saved
|
|
||||||
///
|
|
||||||
/// If logout_devices is true it does the following for each device except the sender device:
|
|
||||||
/// - Invalidates access token
|
|
||||||
/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts)
|
|
||||||
/// - Forgets to-device events
|
|
||||||
/// - Triggers device list updates
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
post("/_matrix/client/r0/account/password", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn change_password_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<change_password::Request<'_>>,
|
|
||||||
) -> ConduitResult<change_password::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Password],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
auth,
|
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
db.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
|
|
||||||
db.users
|
|
||||||
.set_password(sender_user, Some(&body.new_password))?;
|
|
||||||
|
|
||||||
if body.logout_devices {
|
|
||||||
// Logout all devices except the current one
|
|
||||||
for id in db
|
|
||||||
.users
|
|
||||||
.all_device_ids(sender_user)
|
|
||||||
.filter_map(|id| id.ok())
|
|
||||||
.filter(|id| id != sender_device)
|
|
||||||
{
|
|
||||||
db.users.remove_device(sender_user, &id)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(change_password::Response {}.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET _matrix/client/r0/account/whoami`
|
|
||||||
///
|
|
||||||
/// Get user_id of the sender user.
|
|
||||||
///
|
|
||||||
/// Note: Also works for Application Services
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/account/whoami", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(body))]
|
|
||||||
pub async fn whoami_route(body: Ruma<whoami::Request>) -> ConduitResult<whoami::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
Ok(whoami::Response {
|
|
||||||
user_id: sender_user.clone(),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/account/deactivate`
|
|
||||||
///
|
|
||||||
/// Deactivate sender user account.
|
|
||||||
///
|
|
||||||
/// - Leaves all rooms and rejects all invitations
|
|
||||||
/// - Invalidates all access tokens
|
|
||||||
/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts)
|
|
||||||
/// - Forgets all to-device events
|
|
||||||
/// - Triggers device list updates
|
|
||||||
/// - Removes ability to log in again
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
post("/_matrix/client/r0/account/deactivate", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn deactivate_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<deactivate::Request<'_>>,
|
|
||||||
) -> ConduitResult<deactivate::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut uiaainfo = UiaaInfo {
|
|
||||||
flows: vec![AuthFlow {
|
|
||||||
stages: vec![AuthType::Password],
|
|
||||||
}],
|
|
||||||
completed: Vec::new(),
|
|
||||||
params: Default::default(),
|
|
||||||
session: None,
|
|
||||||
auth_error: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(auth) = &body.auth {
|
|
||||||
let (worked, uiaainfo) = db.uiaa.try_auth(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
auth,
|
|
||||||
&uiaainfo,
|
|
||||||
&db.users,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
if !worked {
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
}
|
|
||||||
// Success!
|
|
||||||
} else if let Some(json) = body.json_body {
|
|
||||||
uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH));
|
|
||||||
db.uiaa
|
|
||||||
.create(sender_user, sender_device, &uiaainfo, &json)?;
|
|
||||||
return Err(Error::Uiaa(uiaainfo));
|
|
||||||
} else {
|
|
||||||
return Err(Error::BadRequest(ErrorKind::NotJson, "Not json."));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Leave all joined rooms and reject all invitations
|
|
||||||
// TODO: work over federation invites
|
|
||||||
let all_rooms = db
|
|
||||||
.rooms
|
|
||||||
.rooms_joined(sender_user)
|
|
||||||
.chain(
|
|
||||||
db.rooms
|
|
||||||
.rooms_invited(sender_user)
|
|
||||||
.map(|t| t.map(|(r, _)| r)),
|
|
||||||
)
|
|
||||||
.collect::<Vec<_>>();
|
|
||||||
|
|
||||||
for room_id in all_rooms {
|
|
||||||
let room_id = room_id?;
|
|
||||||
let event = RoomMemberEventContent {
|
|
||||||
membership: MembershipState::Leave,
|
|
||||||
displayname: None,
|
|
||||||
avatar_url: None,
|
|
||||||
is_direct: None,
|
|
||||||
third_party_invite: None,
|
|
||||||
blurhash: None,
|
|
||||||
reason: None,
|
|
||||||
};
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomMember,
|
|
||||||
content: to_raw_value(&event).expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: Some(sender_user.to_string()),
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove devices and mark account as deactivated
|
|
||||||
db.users.deactivate_account(sender_user)?;
|
|
||||||
|
|
||||||
info!("{} deactivated their account", sender_user);
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(deactivate::Response {
|
|
||||||
id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET _matrix/client/r0/account/3pid`
|
|
||||||
///
|
|
||||||
/// Get a list of third party identifiers associated with this account.
|
|
||||||
///
|
|
||||||
/// - Currently always returns empty list
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/account/3pid", data = "<body>")
|
|
||||||
)]
|
|
||||||
pub async fn third_party_route(
|
|
||||||
body: Ruma<get_contacts::Request>,
|
|
||||||
) -> ConduitResult<get_contacts::Response> {
|
|
||||||
let _sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
Ok(get_contacts::Response::new(Vec::new()).into())
|
|
||||||
}
|
|
@ -1,432 +0,0 @@
|
|||||||
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
|
||||||
use ruma::api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
r0::backup::{
|
|
||||||
add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup,
|
|
||||||
delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys,
|
|
||||||
get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys,
|
|
||||||
get_latest_backup, update_backup,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::{delete, get, post, put};
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Creates a new backup.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
post("/_matrix/client/unstable/room_keys/version", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn create_backup_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_backup::Request>,
|
|
||||||
) -> ConduitResult<create_backup::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let version = db
|
|
||||||
.key_backups
|
|
||||||
.create_backup(sender_user, &body.algorithm, &db.globals)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_backup::Response { version }.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/version/{version}`
|
|
||||||
///
|
|
||||||
/// Update information about an existing backup. Only `auth_data` can be modified.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
put("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn update_backup_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<update_backup::Request<'_>>,
|
|
||||||
) -> ConduitResult<update_backup::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
db.key_backups
|
|
||||||
.update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(update_backup::Response {}.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Get information about the latest backup version.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/unstable/room_keys/version", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_latest_backup_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_latest_backup::Request>,
|
|
||||||
) -> ConduitResult<get_latest_backup::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let (version, algorithm) =
|
|
||||||
db.key_backups
|
|
||||||
.get_latest_backup(sender_user)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Key backup does not exist.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_latest_backup::Response {
|
|
||||||
algorithm,
|
|
||||||
count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(),
|
|
||||||
etag: db.key_backups.get_etag(sender_user, &version)?,
|
|
||||||
version,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/version`
|
|
||||||
///
|
|
||||||
/// Get information about an existing backup.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_backup_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_backup::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_backup::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let algorithm = db
|
|
||||||
.key_backups
|
|
||||||
.get_backup(sender_user, &body.version)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Key backup does not exist.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_backup::Response {
|
|
||||||
algorithm,
|
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
|
||||||
version: body.version.to_owned(),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/version/{version}`
|
|
||||||
///
|
|
||||||
/// Delete an existing key backup.
|
|
||||||
///
|
|
||||||
/// - Deletes both information about the backup, as well as all key data related to the backup
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
delete("/_matrix/client/unstable/room_keys/version/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn delete_backup_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<delete_backup::Request<'_>>,
|
|
||||||
) -> ConduitResult<delete_backup::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
db.key_backups.delete_backup(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_backup::Response {}.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Add the received backup keys to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
put("/_matrix/client/unstable/room_keys/keys", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn add_backup_keys_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<add_backup_keys::Request<'_>>,
|
|
||||||
) -> ConduitResult<add_backup_keys::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= db
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (room_id, room) in &body.rooms {
|
|
||||||
for (session_id, key_data) in &room.sessions {
|
|
||||||
db.key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
room_id,
|
|
||||||
session_id,
|
|
||||||
key_data,
|
|
||||||
&db.globals,
|
|
||||||
)?
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_keys::Response {
|
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Add the received backup keys to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
put("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn add_backup_key_sessions_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<add_backup_key_sessions::Request<'_>>,
|
|
||||||
) -> ConduitResult<add_backup_key_sessions::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= db
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
for (session_id, key_data) in &body.sessions {
|
|
||||||
db.key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
&body.room_id,
|
|
||||||
session_id,
|
|
||||||
key_data,
|
|
||||||
&db.globals,
|
|
||||||
)?
|
|
||||||
}
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_key_sessions::Response {
|
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Add the received backup key to the database.
|
|
||||||
///
|
|
||||||
/// - Only manipulating the most recently created version of the backup is allowed
|
|
||||||
/// - Adds the keys to the backup
|
|
||||||
/// - Returns the new number of keys in this backup and the etag
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn add_backup_key_session_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<add_backup_key_session::Request<'_>>,
|
|
||||||
) -> ConduitResult<add_backup_key_session::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if Some(&body.version)
|
|
||||||
!= db
|
|
||||||
.key_backups
|
|
||||||
.get_latest_backup_version(sender_user)?
|
|
||||||
.as_ref()
|
|
||||||
{
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"You may only manipulate the most recently created version of the backup.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
db.key_backups.add_key(
|
|
||||||
sender_user,
|
|
||||||
&body.version,
|
|
||||||
&body.room_id,
|
|
||||||
&body.session_id,
|
|
||||||
&body.session_data,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(add_backup_key_session::Response {
|
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Retrieves all keys from the backup.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/unstable/room_keys/keys", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_backup_keys_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_backup_keys::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_backup_keys::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let rooms = db.key_backups.get_all(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
Ok(get_backup_keys::Response { rooms }.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Retrieves all keys from the backup for a given room.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_backup_key_sessions_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_backup_key_sessions::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_backup_key_sessions::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let sessions = db
|
|
||||||
.key_backups
|
|
||||||
.get_room(sender_user, &body.version, &body.room_id)?;
|
|
||||||
|
|
||||||
Ok(get_backup_key_sessions::Response { sessions }.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Retrieves a key from the backup.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_backup_key_session_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_backup_key_session::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_backup_key_session::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let key_data = db
|
|
||||||
.key_backups
|
|
||||||
.get_session(sender_user, &body.version, &body.room_id, &body.session_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Backup key not found for this user's session.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
Ok(get_backup_key_session::Response { key_data }.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys`
|
|
||||||
///
|
|
||||||
/// Delete the keys from the backup.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
delete("/_matrix/client/unstable/room_keys/keys", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn delete_backup_keys_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<delete_backup_keys::Request<'_>>,
|
|
||||||
) -> ConduitResult<delete_backup_keys::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
db.key_backups.delete_all_keys(sender_user, &body.version)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_backup_keys::Response {
|
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}`
|
|
||||||
///
|
|
||||||
/// Delete the keys from the backup for a given room.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn delete_backup_key_sessions_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<delete_backup_key_sessions::Request<'_>>,
|
|
||||||
) -> ConduitResult<delete_backup_key_sessions::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
db.key_backups
|
|
||||||
.delete_room_keys(sender_user, &body.version, &body.room_id)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_backup_key_sessions::Response {
|
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}`
|
|
||||||
///
|
|
||||||
/// Delete a key from the backup.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn delete_backup_key_session_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<delete_backup_key_session::Request<'_>>,
|
|
||||||
) -> ConduitResult<delete_backup_key_session::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
db.key_backups
|
|
||||||
.delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_backup_key_session::Response {
|
|
||||||
count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(),
|
|
||||||
etag: db.key_backups.get_etag(sender_user, &body.version)?,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
use crate::{ConduitResult, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::r0::capabilities::{
|
|
||||||
get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability,
|
|
||||||
},
|
|
||||||
RoomVersionId,
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::get;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/capabilities`
|
|
||||||
///
|
|
||||||
/// Get information on the supported feature set and other relevent capabilities of this server.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/capabilities", data = "<_body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(_body))]
|
|
||||||
pub async fn get_capabilities_route(
|
|
||||||
_body: Ruma<get_capabilities::Request>,
|
|
||||||
) -> ConduitResult<get_capabilities::Response> {
|
|
||||||
let mut available = BTreeMap::new();
|
|
||||||
available.insert(RoomVersionId::Version5, RoomVersionStability::Stable);
|
|
||||||
available.insert(RoomVersionId::Version6, RoomVersionStability::Stable);
|
|
||||||
|
|
||||||
let mut capabilities = Capabilities::new();
|
|
||||||
capabilities.room_versions = RoomVersionsCapability {
|
|
||||||
default: RoomVersionId::Version6,
|
|
||||||
available,
|
|
||||||
};
|
|
||||||
|
|
||||||
Ok(get_capabilities::Response { capabilities }.into())
|
|
||||||
}
|
|
@ -1,109 +0,0 @@
|
|||||||
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
|
||||||
use ruma::api::client::{error::ErrorKind, r0::context::get_context};
|
|
||||||
use std::convert::TryFrom;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::get;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/context`
|
|
||||||
///
|
|
||||||
/// Allows loading room history around an event.
|
|
||||||
///
|
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events if the user was
|
|
||||||
/// joined, depending on history_visibility)
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_context_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_context::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_context::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You don't have permission to view this room.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let base_pdu_id = db
|
|
||||||
.rooms
|
|
||||||
.get_pdu_id(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Base event id not found.",
|
|
||||||
))?;
|
|
||||||
|
|
||||||
let base_token = db.rooms.pdu_count(&base_pdu_id)?;
|
|
||||||
|
|
||||||
let base_event = db
|
|
||||||
.rooms
|
|
||||||
.get_pdu_from_id(&base_pdu_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::NotFound,
|
|
||||||
"Base event not found.",
|
|
||||||
))?
|
|
||||||
.to_room_event();
|
|
||||||
|
|
||||||
let events_before: Vec<_> = db
|
|
||||||
.rooms
|
|
||||||
.pdus_until(sender_user, &body.room_id, base_token)?
|
|
||||||
.take(
|
|
||||||
u32::try_from(body.limit).map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
|
||||||
})? as usize
|
|
||||||
/ 2,
|
|
||||||
)
|
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let start_token = events_before
|
|
||||||
.last()
|
|
||||||
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
|
||||||
.map(|count| count.to_string());
|
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let events_after: Vec<_> = db
|
|
||||||
.rooms
|
|
||||||
.pdus_after(sender_user, &body.room_id, base_token)?
|
|
||||||
.take(
|
|
||||||
u32::try_from(body.limit).map_err(|_| {
|
|
||||||
Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.")
|
|
||||||
})? as usize
|
|
||||||
/ 2,
|
|
||||||
)
|
|
||||||
.filter_map(|r| r.ok()) // Remove buggy events
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let end_token = events_after
|
|
||||||
.last()
|
|
||||||
.and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok())
|
|
||||||
.map(|count| count.to_string());
|
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut resp = get_context::Response::new();
|
|
||||||
resp.start = start_token;
|
|
||||||
resp.end = end_token;
|
|
||||||
resp.events_before = events_before;
|
|
||||||
resp.event = Some(base_event);
|
|
||||||
resp.events_after = events_after;
|
|
||||||
resp.state = db // TODO: State at event
|
|
||||||
.rooms
|
|
||||||
.room_state_full(&body.room_id)?
|
|
||||||
.values()
|
|
||||||
.map(|pdu| pdu.to_state_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
Ok(resp.into())
|
|
||||||
}
|
|
@ -1,32 +0,0 @@
|
|||||||
use crate::{utils, ConduitResult};
|
|
||||||
use ruma::api::client::r0::filter::{self, create_filter, get_filter};
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::{get, post};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}`
|
|
||||||
///
|
|
||||||
/// TODO: Loads a filter that was previously created.
|
|
||||||
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))]
|
|
||||||
#[tracing::instrument]
|
|
||||||
pub async fn get_filter_route() -> ConduitResult<get_filter::Response> {
|
|
||||||
// TODO
|
|
||||||
Ok(get_filter::Response::new(filter::IncomingFilterDefinition {
|
|
||||||
event_fields: None,
|
|
||||||
event_format: filter::EventFormat::default(),
|
|
||||||
account_data: filter::IncomingFilter::default(),
|
|
||||||
room: filter::IncomingRoomFilter::default(),
|
|
||||||
presence: filter::IncomingFilter::default(),
|
|
||||||
})
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/filter`
|
|
||||||
///
|
|
||||||
/// TODO: Creates a new filter to be used by other endpoints.
|
|
||||||
#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))]
|
|
||||||
#[tracing::instrument]
|
|
||||||
pub async fn create_filter_route() -> ConduitResult<create_filter::Response> {
|
|
||||||
// TODO
|
|
||||||
Ok(create_filter::Response::new(utils::random_string(10)).into())
|
|
||||||
}
|
|
@ -1,198 +0,0 @@
|
|||||||
use crate::{
|
|
||||||
database::{media::FileMeta, DatabaseGuard},
|
|
||||||
utils, ConduitResult, Error, Ruma,
|
|
||||||
};
|
|
||||||
use ruma::api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
r0::media::{create_content, get_content, get_content_thumbnail, get_media_config},
|
|
||||||
};
|
|
||||||
use std::convert::TryInto;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::{get, post};
|
|
||||||
|
|
||||||
const MXC_LENGTH: usize = 32;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/media/r0/config`
|
|
||||||
///
|
|
||||||
/// Returns max upload size.
|
|
||||||
#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))]
|
|
||||||
#[tracing::instrument(skip(db))]
|
|
||||||
pub async fn get_media_config_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
) -> ConduitResult<get_media_config::Response> {
|
|
||||||
Ok(get_media_config::Response {
|
|
||||||
upload_size: db.globals.max_request_size().into(),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/media/r0/upload`
|
|
||||||
///
|
|
||||||
/// Permanently save media in the server.
|
|
||||||
///
|
|
||||||
/// - Some metadata will be saved in the database
|
|
||||||
/// - Media will be saved in the media/ directory
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
post("/_matrix/media/r0/upload", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn create_content_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_content::Request<'_>>,
|
|
||||||
) -> ConduitResult<create_content::Response> {
|
|
||||||
let mxc = format!(
|
|
||||||
"mxc://{}/{}",
|
|
||||||
db.globals.server_name(),
|
|
||||||
utils::random_string(MXC_LENGTH)
|
|
||||||
);
|
|
||||||
|
|
||||||
db.media
|
|
||||||
.create(
|
|
||||||
mxc.clone(),
|
|
||||||
&db.globals,
|
|
||||||
&body
|
|
||||||
.filename
|
|
||||||
.as_ref()
|
|
||||||
.map(|filename| "inline; filename=".to_owned() + filename)
|
|
||||||
.as_deref(),
|
|
||||||
&body.content_type.as_deref(),
|
|
||||||
&body.file,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_content::Response {
|
|
||||||
content_uri: mxc.try_into().expect("Invalid mxc:// URI"),
|
|
||||||
blurhash: None,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/media/r0/download/{serverName}/{mediaId}`
|
|
||||||
///
|
|
||||||
/// Load media from our server or over federation.
|
|
||||||
///
|
|
||||||
/// - Only allows federation if `allow_remote` is true
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/media/r0/download/<_>/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_content_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_content::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_content::Response> {
|
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
|
||||||
|
|
||||||
if let Some(FileMeta {
|
|
||||||
content_disposition,
|
|
||||||
content_type,
|
|
||||||
file,
|
|
||||||
}) = db.media.get(&db.globals, &mxc).await?
|
|
||||||
{
|
|
||||||
Ok(get_content::Response {
|
|
||||||
file,
|
|
||||||
content_type,
|
|
||||||
content_disposition,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
|
||||||
let get_content_response = db
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
&db.globals,
|
|
||||||
&body.server_name,
|
|
||||||
get_content::Request {
|
|
||||||
allow_remote: false,
|
|
||||||
server_name: &body.server_name,
|
|
||||||
media_id: &body.media_id,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
db.media
|
|
||||||
.create(
|
|
||||||
mxc,
|
|
||||||
&db.globals,
|
|
||||||
&get_content_response.content_disposition.as_deref(),
|
|
||||||
&get_content_response.content_type.as_deref(),
|
|
||||||
&get_content_response.file,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(get_content_response.into())
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/media/r0/thumbnail/{serverName}/{mediaId}`
|
|
||||||
///
|
|
||||||
/// Load media thumbnail from our server or over federation.
|
|
||||||
///
|
|
||||||
/// - Only allows federation if `allow_remote` is true
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_content_thumbnail_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_content_thumbnail::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_content_thumbnail::Response> {
|
|
||||||
let mxc = format!("mxc://{}/{}", body.server_name, body.media_id);
|
|
||||||
|
|
||||||
if let Some(FileMeta {
|
|
||||||
content_type, file, ..
|
|
||||||
}) = db
|
|
||||||
.media
|
|
||||||
.get_thumbnail(
|
|
||||||
mxc.clone(),
|
|
||||||
&db.globals,
|
|
||||||
body.width
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
|
||||||
body.height
|
|
||||||
.try_into()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?,
|
|
||||||
)
|
|
||||||
.await?
|
|
||||||
{
|
|
||||||
Ok(get_content_thumbnail::Response { file, content_type }.into())
|
|
||||||
} else if &*body.server_name != db.globals.server_name() && body.allow_remote {
|
|
||||||
let get_thumbnail_response = db
|
|
||||||
.sending
|
|
||||||
.send_federation_request(
|
|
||||||
&db.globals,
|
|
||||||
&body.server_name,
|
|
||||||
get_content_thumbnail::Request {
|
|
||||||
allow_remote: false,
|
|
||||||
height: body.height,
|
|
||||||
width: body.width,
|
|
||||||
method: body.method.clone(),
|
|
||||||
server_name: &body.server_name,
|
|
||||||
media_id: &body.media_id,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
db.media
|
|
||||||
.upload_thumbnail(
|
|
||||||
mxc,
|
|
||||||
&db.globals,
|
|
||||||
&None,
|
|
||||||
&get_thumbnail_response.content_type,
|
|
||||||
body.width.try_into().expect("all UInts are valid u32s"),
|
|
||||||
body.height.try_into().expect("all UInts are valid u32s"),
|
|
||||||
&get_thumbnail_response.file,
|
|
||||||
)
|
|
||||||
.await?;
|
|
||||||
|
|
||||||
Ok(get_thumbnail_response.into())
|
|
||||||
} else {
|
|
||||||
Err(Error::BadRequest(ErrorKind::NotFound, "Media not found."))
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,207 +0,0 @@
|
|||||||
use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
r0::message::{get_message_events, send_message_event},
|
|
||||||
},
|
|
||||||
events::EventType,
|
|
||||||
EventId,
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
collections::BTreeMap,
|
|
||||||
convert::{TryFrom, TryInto},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::{get, put};
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}`
|
|
||||||
///
|
|
||||||
/// Send a message event into the room.
|
|
||||||
///
|
|
||||||
/// - Is a NOOP if the txn id was already used before and returns the same event id again
|
|
||||||
/// - The only requirement for the content is that it has to be valid json
|
|
||||||
/// - Tries to send the event into the room, auth rules will determine if it is allowed
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn send_message_event_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<send_message_event::Request<'_>>,
|
|
||||||
) -> ConduitResult<send_message_event::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_deref();
|
|
||||||
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(body.room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
// Forbid m.room.encrypted if encryption is disabled
|
|
||||||
if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"Encryption has been disabled",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if this is a new transaction id
|
|
||||||
if let Some(response) =
|
|
||||||
db.transaction_ids
|
|
||||||
.existing_txnid(sender_user, sender_device, &body.txn_id)?
|
|
||||||
{
|
|
||||||
// The client might have sent a txnid of the /sendToDevice endpoint
|
|
||||||
// This txnid has no response associated with it
|
|
||||||
if response.is_empty() {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Tried to use txn id already used for an incompatible endpoint.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let event_id = EventId::try_from(
|
|
||||||
utils::string_from_bytes(&response)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?,
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid event id in txnid data."))?;
|
|
||||||
return Ok(send_message_event::Response { event_id }.into());
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut unsigned = BTreeMap::new();
|
|
||||||
unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into());
|
|
||||||
|
|
||||||
let event_id = db.rooms.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::from(&body.event_type),
|
|
||||||
content: serde_json::from_str(body.body.body.json().get())
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?,
|
|
||||||
unsigned: Some(unsigned),
|
|
||||||
state_key: None,
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
&db,
|
|
||||||
&state_lock,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.transaction_ids.add_txnid(
|
|
||||||
sender_user,
|
|
||||||
sender_device,
|
|
||||||
&body.txn_id,
|
|
||||||
event_id.as_bytes(),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(send_message_event::Response::new(event_id).into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/rooms/{roomId}/messages`
|
|
||||||
///
|
|
||||||
/// Allows paginating through room history.
|
|
||||||
///
|
|
||||||
/// - Only works if the user is joined (TODO: always allow, but only show events where the user was
|
|
||||||
/// joined, depending on history_visibility)
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/rooms/<_>/messages", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_message_events_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_message_events::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_message_events::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if !db.rooms.is_joined(sender_user, &body.room_id)? {
|
|
||||||
return Err(Error::BadRequest(
|
|
||||||
ErrorKind::Forbidden,
|
|
||||||
"You don't have permission to view this room.",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
let from = body
|
|
||||||
.from
|
|
||||||
.clone()
|
|
||||||
.parse()
|
|
||||||
.map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?;
|
|
||||||
|
|
||||||
let to = body.to.as_ref().map(|t| t.parse());
|
|
||||||
|
|
||||||
// Use limit or else 10
|
|
||||||
let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize);
|
|
||||||
|
|
||||||
match body.dir {
|
|
||||||
get_message_events::Direction::Forward => {
|
|
||||||
let events_after: Vec<_> = db
|
|
||||||
.rooms
|
|
||||||
.pdus_after(sender_user, &body.room_id, from)?
|
|
||||||
.take(limit)
|
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
|
||||||
.filter_map(|(pdu_id, pdu)| {
|
|
||||||
db.rooms
|
|
||||||
.pdu_count(&pdu_id)
|
|
||||||
.map(|pdu_count| (pdu_count, pdu))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let end_token = events_after.last().map(|(count, _)| count.to_string());
|
|
||||||
|
|
||||||
let events_after: Vec<_> = events_after
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut resp = get_message_events::Response::new();
|
|
||||||
resp.start = Some(body.from.to_owned());
|
|
||||||
resp.end = end_token;
|
|
||||||
resp.chunk = events_after;
|
|
||||||
resp.state = Vec::new();
|
|
||||||
|
|
||||||
Ok(resp.into())
|
|
||||||
}
|
|
||||||
get_message_events::Direction::Backward => {
|
|
||||||
let events_before: Vec<_> = db
|
|
||||||
.rooms
|
|
||||||
.pdus_until(sender_user, &body.room_id, from)?
|
|
||||||
.take(limit)
|
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
|
||||||
.filter_map(|(pdu_id, pdu)| {
|
|
||||||
db.rooms
|
|
||||||
.pdu_count(&pdu_id)
|
|
||||||
.map(|pdu_count| (pdu_count, pdu))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to`
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let start_token = events_before.last().map(|(count, _)| count.to_string());
|
|
||||||
|
|
||||||
let events_before: Vec<_> = events_before
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut resp = get_message_events::Response::new();
|
|
||||||
resp.start = Some(body.from.to_owned());
|
|
||||||
resp.end = start_token;
|
|
||||||
resp.chunk = events_before;
|
|
||||||
resp.state = Vec::new();
|
|
||||||
|
|
||||||
Ok(resp.into())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,143 +0,0 @@
|
|||||||
use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::{
|
|
||||||
error::ErrorKind,
|
|
||||||
r0::{read_marker::set_read_marker, receipt::create_receipt},
|
|
||||||
},
|
|
||||||
events::{AnyEphemeralRoomEvent, EventType},
|
|
||||||
receipt::ReceiptType,
|
|
||||||
MilliSecondsSinceUnixEpoch,
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::post;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers`
|
|
||||||
///
|
|
||||||
/// Sets different types of read markers.
|
|
||||||
///
|
|
||||||
/// - Updates fully-read account data event to `fully_read`
|
|
||||||
/// - If `read_receipt` is set: Update private marker and public read receipt EDU
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
post("/_matrix/client/r0/rooms/<_>/read_markers", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn set_read_marker_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<set_read_marker::Request<'_>>,
|
|
||||||
) -> ConduitResult<set_read_marker::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let fully_read_event = ruma::events::fully_read::FullyReadEvent {
|
|
||||||
content: ruma::events::fully_read::FullyReadEventContent {
|
|
||||||
event_id: body.fully_read.clone(),
|
|
||||||
},
|
|
||||||
};
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
EventType::FullyRead,
|
|
||||||
&fully_read_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
if let Some(event) = &body.read_receipt {
|
|
||||||
db.rooms.edus.private_read_set(
|
|
||||||
&body.room_id,
|
|
||||||
sender_user,
|
|
||||||
db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
db.rooms
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(event.to_owned(), receipts);
|
|
||||||
|
|
||||||
db.rooms.edus.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
}),
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(set_read_marker::Response {}.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}`
|
|
||||||
///
|
|
||||||
/// Sets private read marker and public read receipt EDU.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn create_receipt_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_receipt::Request<'_>>,
|
|
||||||
) -> ConduitResult<create_receipt::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
db.rooms.edus.private_read_set(
|
|
||||||
&body.room_id,
|
|
||||||
sender_user,
|
|
||||||
db.rooms
|
|
||||||
.get_pdu_count(&body.event_id)?
|
|
||||||
.ok_or(Error::BadRequest(
|
|
||||||
ErrorKind::InvalidParam,
|
|
||||||
"Event does not exist.",
|
|
||||||
))?,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
db.rooms
|
|
||||||
.reset_notification_counts(sender_user, &body.room_id)?;
|
|
||||||
|
|
||||||
let mut user_receipts = BTreeMap::new();
|
|
||||||
user_receipts.insert(
|
|
||||||
sender_user.clone(),
|
|
||||||
ruma::events::receipt::Receipt {
|
|
||||||
ts: Some(MilliSecondsSinceUnixEpoch::now()),
|
|
||||||
},
|
|
||||||
);
|
|
||||||
let mut receipts = BTreeMap::new();
|
|
||||||
receipts.insert(ReceiptType::Read, user_receipts);
|
|
||||||
|
|
||||||
let mut receipt_content = BTreeMap::new();
|
|
||||||
receipt_content.insert(body.event_id.to_owned(), receipts);
|
|
||||||
|
|
||||||
db.rooms.edus.readreceipt_update(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent {
|
|
||||||
content: ruma::events::receipt::ReceiptEventContent(receipt_content),
|
|
||||||
room_id: body.room_id.clone(),
|
|
||||||
}),
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_receipt::Response {}.into())
|
|
||||||
}
|
|
@ -1,808 +0,0 @@
|
|||||||
use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse};
|
|
||||||
use ruma::{
|
|
||||||
api::client::r0::{sync::sync_events, uiaa::UiaaResponse},
|
|
||||||
events::{
|
|
||||||
room::member::{MembershipState, RoomMemberEventContent},
|
|
||||||
AnySyncEphemeralRoomEvent, EventType,
|
|
||||||
},
|
|
||||||
serde::Raw,
|
|
||||||
DeviceId, RoomId, UserId,
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
collections::{hash_map::Entry, BTreeMap, HashMap, HashSet},
|
|
||||||
convert::{TryFrom, TryInto},
|
|
||||||
sync::Arc,
|
|
||||||
time::Duration,
|
|
||||||
};
|
|
||||||
use tokio::sync::watch::Sender;
|
|
||||||
use tracing::error;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::{get, tokio};
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/sync`
|
|
||||||
///
|
|
||||||
/// Synchronize the client's state with the latest state on the server.
|
|
||||||
///
|
|
||||||
/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a
|
|
||||||
/// previous request for incremental syncs.
|
|
||||||
///
|
|
||||||
/// Calling this endpoint without a `since` parameter returns:
|
|
||||||
/// - Some of the most recent events of each timeline
|
|
||||||
/// - Notification counts for each room
|
|
||||||
/// - Joined and invited member counts, heroes
|
|
||||||
/// - All state events
|
|
||||||
///
|
|
||||||
/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns:
|
|
||||||
/// For joined rooms:
|
|
||||||
/// - Some of the most recent events of each timeline that happened after since
|
|
||||||
/// - If user joined the room after since: All state events and device list updates in that room
|
|
||||||
/// - If the user was already in the room: A list of all events that are in the state now, but were
|
|
||||||
/// not in the state at `since`
|
|
||||||
/// - If the state we send contains a member event: Joined and invited member counts, heroes
|
|
||||||
/// - Device list updates that happened after `since`
|
|
||||||
/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts
|
|
||||||
/// - EDUs that are active now (read receipts, typing updates, presence)
|
|
||||||
///
|
|
||||||
/// For invited rooms:
|
|
||||||
/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite
|
|
||||||
///
|
|
||||||
/// For left rooms:
|
|
||||||
/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave)
|
|
||||||
///
|
|
||||||
/// - Sync is handled in an async task, multiple requests from the same device with the same
|
|
||||||
/// `since` will be cached
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/sync", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn sync_events_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<sync_events::Request<'_>>,
|
|
||||||
) -> Result<RumaResponse<sync_events::Response>, RumaResponse<UiaaResponse>> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
let sender_device = body.sender_device.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let arc_db = Arc::new(db);
|
|
||||||
|
|
||||||
let mut rx = match arc_db
|
|
||||||
.globals
|
|
||||||
.sync_receivers
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry((sender_user.clone(), sender_device.clone()))
|
|
||||||
{
|
|
||||||
Entry::Vacant(v) => {
|
|
||||||
let (tx, rx) = tokio::sync::watch::channel(None);
|
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
|
||||||
Arc::clone(&arc_db),
|
|
||||||
sender_user.clone(),
|
|
||||||
sender_device.clone(),
|
|
||||||
body.since.clone(),
|
|
||||||
body.full_state,
|
|
||||||
body.timeout,
|
|
||||||
tx,
|
|
||||||
));
|
|
||||||
|
|
||||||
v.insert((body.since.clone(), rx)).1.clone()
|
|
||||||
}
|
|
||||||
Entry::Occupied(mut o) => {
|
|
||||||
if o.get().0 != body.since {
|
|
||||||
let (tx, rx) = tokio::sync::watch::channel(None);
|
|
||||||
|
|
||||||
tokio::spawn(sync_helper_wrapper(
|
|
||||||
Arc::clone(&arc_db),
|
|
||||||
sender_user.clone(),
|
|
||||||
sender_device.clone(),
|
|
||||||
body.since.clone(),
|
|
||||||
body.full_state,
|
|
||||||
body.timeout,
|
|
||||||
tx,
|
|
||||||
));
|
|
||||||
|
|
||||||
o.insert((body.since.clone(), rx.clone()));
|
|
||||||
|
|
||||||
rx
|
|
||||||
} else {
|
|
||||||
o.get().1.clone()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let we_have_to_wait = rx.borrow().is_none();
|
|
||||||
if we_have_to_wait {
|
|
||||||
if let Err(e) = rx.changed().await {
|
|
||||||
error!("Error waiting for sync: {}", e);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let result = match rx
|
|
||||||
.borrow()
|
|
||||||
.as_ref()
|
|
||||||
.expect("When sync channel changes it's always set to some")
|
|
||||||
{
|
|
||||||
Ok(response) => Ok(response.clone()),
|
|
||||||
Err(error) => Err(error.to_response()),
|
|
||||||
};
|
|
||||||
|
|
||||||
result
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn sync_helper_wrapper(
|
|
||||||
db: Arc<DatabaseGuard>,
|
|
||||||
sender_user: UserId,
|
|
||||||
sender_device: Box<DeviceId>,
|
|
||||||
since: Option<String>,
|
|
||||||
full_state: bool,
|
|
||||||
timeout: Option<Duration>,
|
|
||||||
tx: Sender<Option<ConduitResult<sync_events::Response>>>,
|
|
||||||
) {
|
|
||||||
let r = sync_helper(
|
|
||||||
Arc::clone(&db),
|
|
||||||
sender_user.clone(),
|
|
||||||
sender_device.clone(),
|
|
||||||
since.clone(),
|
|
||||||
full_state,
|
|
||||||
timeout,
|
|
||||||
)
|
|
||||||
.await;
|
|
||||||
|
|
||||||
if let Ok((_, caching_allowed)) = r {
|
|
||||||
if !caching_allowed {
|
|
||||||
match db
|
|
||||||
.globals
|
|
||||||
.sync_receivers
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry((sender_user, sender_device))
|
|
||||||
{
|
|
||||||
Entry::Occupied(o) => {
|
|
||||||
// Only remove if the device didn't start a different /sync already
|
|
||||||
if o.get().0 == since {
|
|
||||||
o.remove();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Entry::Vacant(_) => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(db);
|
|
||||||
|
|
||||||
let _ = tx.send(Some(r.map(|(r, _)| r.into())));
|
|
||||||
}
|
|
||||||
|
|
||||||
async fn sync_helper(
|
|
||||||
db: Arc<DatabaseGuard>,
|
|
||||||
sender_user: UserId,
|
|
||||||
sender_device: Box<DeviceId>,
|
|
||||||
since: Option<String>,
|
|
||||||
full_state: bool,
|
|
||||||
timeout: Option<Duration>,
|
|
||||||
// bool = caching allowed
|
|
||||||
) -> Result<(sync_events::Response, bool), Error> {
|
|
||||||
// TODO: match body.set_presence {
|
|
||||||
db.rooms.edus.ping_presence(&sender_user)?;
|
|
||||||
|
|
||||||
// Setup watchers, so if there's no response, we can wait for them
|
|
||||||
let watcher = db.watch(&sender_user, &sender_device);
|
|
||||||
|
|
||||||
let next_batch = db.globals.current_count()?;
|
|
||||||
let next_batch_string = next_batch.to_string();
|
|
||||||
|
|
||||||
let mut joined_rooms = BTreeMap::new();
|
|
||||||
let since = since
|
|
||||||
.clone()
|
|
||||||
.and_then(|string| string.parse().ok())
|
|
||||||
.unwrap_or(0);
|
|
||||||
|
|
||||||
let mut presence_updates = HashMap::new();
|
|
||||||
let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in
|
|
||||||
let mut device_list_updates = HashSet::new();
|
|
||||||
let mut device_list_left = HashSet::new();
|
|
||||||
|
|
||||||
// Look for device list updates of this account
|
|
||||||
device_list_updates.extend(
|
|
||||||
db.users
|
|
||||||
.keys_changed(&sender_user.to_string(), since, None)
|
|
||||||
.filter_map(|r| r.ok()),
|
|
||||||
);
|
|
||||||
|
|
||||||
let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::<Vec<_>>();
|
|
||||||
for room_id in all_joined_rooms {
|
|
||||||
let room_id = room_id?;
|
|
||||||
|
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
|
||||||
// This will make sure the we have all events until next_batch
|
|
||||||
let mutex_insert = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_insert
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
|
||||||
drop(insert_lock);
|
|
||||||
|
|
||||||
let mut non_timeline_pdus = db
|
|
||||||
.rooms
|
|
||||||
.pdus_until(&sender_user, &room_id, u64::MAX)?
|
|
||||||
.filter_map(|r| {
|
|
||||||
// Filter out buggy events
|
|
||||||
if r.is_err() {
|
|
||||||
error!("Bad pdu in pdus_since: {:?}", r);
|
|
||||||
}
|
|
||||||
r.ok()
|
|
||||||
})
|
|
||||||
.take_while(|(pduid, _)| {
|
|
||||||
db.rooms
|
|
||||||
.pdu_count(pduid)
|
|
||||||
.map_or(false, |count| count > since)
|
|
||||||
});
|
|
||||||
|
|
||||||
// Take the last 10 events for the timeline
|
|
||||||
let timeline_pdus: Vec<_> = non_timeline_pdus
|
|
||||||
.by_ref()
|
|
||||||
.take(10)
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
.into_iter()
|
|
||||||
.rev()
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let send_notification_counts = !timeline_pdus.is_empty()
|
|
||||||
|| db
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.last_privateread_update(&sender_user, &room_id)?
|
|
||||||
> since;
|
|
||||||
|
|
||||||
// They /sync response doesn't always return all messages, so we say the output is
|
|
||||||
// limited unless there are events in non_timeline_pdus
|
|
||||||
let limited = non_timeline_pdus.next().is_some();
|
|
||||||
|
|
||||||
// Database queries:
|
|
||||||
|
|
||||||
let current_shortstatehash = db
|
|
||||||
.rooms
|
|
||||||
.current_shortstatehash(&room_id)?
|
|
||||||
.expect("All rooms have state");
|
|
||||||
|
|
||||||
let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?;
|
|
||||||
|
|
||||||
// Calculates joined_member_count, invited_member_count and heroes
|
|
||||||
let calculate_counts = || {
|
|
||||||
let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0);
|
|
||||||
let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0);
|
|
||||||
|
|
||||||
// Recalculate heroes (first 5 members)
|
|
||||||
let mut heroes = Vec::new();
|
|
||||||
|
|
||||||
if joined_member_count + invited_member_count <= 5 {
|
|
||||||
// Go through all PDUs and for each member event, check if the user is still joined or
|
|
||||||
// invited until we have 5 or we reach the end
|
|
||||||
|
|
||||||
for hero in db
|
|
||||||
.rooms
|
|
||||||
.all_pdus(&sender_user, &room_id)?
|
|
||||||
.filter_map(|pdu| pdu.ok()) // Ignore all broken pdus
|
|
||||||
.filter(|(_, pdu)| pdu.kind == EventType::RoomMember)
|
|
||||||
.map(|(_, pdu)| {
|
|
||||||
let content: RoomMemberEventContent =
|
|
||||||
serde_json::from_str(pdu.content.get()).map_err(|_| {
|
|
||||||
Error::bad_database("Invalid member event in database.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
if let Some(state_key) = &pdu.state_key {
|
|
||||||
let user_id = UserId::try_from(state_key.clone()).map_err(|_| {
|
|
||||||
Error::bad_database("Invalid UserId in member PDU.")
|
|
||||||
})?;
|
|
||||||
|
|
||||||
// The membership was and still is invite or join
|
|
||||||
if matches!(
|
|
||||||
content.membership,
|
|
||||||
MembershipState::Join | MembershipState::Invite
|
|
||||||
) && (db.rooms.is_joined(&user_id, &room_id)?
|
|
||||||
|| db.rooms.is_invited(&user_id, &room_id)?)
|
|
||||||
{
|
|
||||||
Ok::<_, Error>(Some(state_key.clone()))
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
Ok(None)
|
|
||||||
}
|
|
||||||
})
|
|
||||||
// Filter out buggy users
|
|
||||||
.filter_map(|u| u.ok())
|
|
||||||
// Filter for possible heroes
|
|
||||||
.flatten()
|
|
||||||
{
|
|
||||||
if heroes.contains(&hero) || hero == sender_user.as_str() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
heroes.push(hero);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok::<_, Error>((
|
|
||||||
Some(joined_member_count),
|
|
||||||
Some(invited_member_count),
|
|
||||||
heroes,
|
|
||||||
))
|
|
||||||
};
|
|
||||||
|
|
||||||
let (
|
|
||||||
heroes,
|
|
||||||
joined_member_count,
|
|
||||||
invited_member_count,
|
|
||||||
joined_since_last_sync,
|
|
||||||
state_events,
|
|
||||||
) = if since_shortstatehash.is_none() {
|
|
||||||
// Probably since = 0, we will do an initial sync
|
|
||||||
let (joined_member_count, invited_member_count, heroes) = calculate_counts()?;
|
|
||||||
|
|
||||||
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
|
|
||||||
let state_events: Vec<_> = current_state_ids
|
|
||||||
.iter()
|
|
||||||
.map(|(_, id)| db.rooms.get_pdu(id))
|
|
||||||
.filter_map(|r| r.ok().flatten())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
(
|
|
||||||
heroes,
|
|
||||||
joined_member_count,
|
|
||||||
invited_member_count,
|
|
||||||
true,
|
|
||||||
state_events,
|
|
||||||
)
|
|
||||||
} else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) {
|
|
||||||
// No state changes
|
|
||||||
(Vec::new(), None, None, false, Vec::new())
|
|
||||||
} else {
|
|
||||||
// Incremental /sync
|
|
||||||
let since_shortstatehash = since_shortstatehash.unwrap();
|
|
||||||
|
|
||||||
let since_sender_member: Option<RoomMemberEventContent> = db
|
|
||||||
.rooms
|
|
||||||
.state_get(
|
|
||||||
since_shortstatehash,
|
|
||||||
&EventType::RoomMember,
|
|
||||||
sender_user.as_str(),
|
|
||||||
)?
|
|
||||||
.and_then(|pdu| {
|
|
||||||
serde_json::from_str(pdu.content.get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid PDU in database."))
|
|
||||||
.ok()
|
|
||||||
});
|
|
||||||
|
|
||||||
let joined_since_last_sync = since_sender_member
|
|
||||||
.map_or(true, |member| member.membership != MembershipState::Join);
|
|
||||||
|
|
||||||
let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?;
|
|
||||||
|
|
||||||
let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?;
|
|
||||||
|
|
||||||
let state_events = if joined_since_last_sync {
|
|
||||||
current_state_ids
|
|
||||||
.iter()
|
|
||||||
.map(|(_, id)| db.rooms.get_pdu(id))
|
|
||||||
.filter_map(|r| r.ok().flatten())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
} else {
|
|
||||||
current_state_ids
|
|
||||||
.iter()
|
|
||||||
.filter(|(key, id)| since_state_ids.get(key) != Some(id))
|
|
||||||
.map(|(_, id)| db.rooms.get_pdu(id))
|
|
||||||
.filter_map(|r| r.ok().flatten())
|
|
||||||
.collect()
|
|
||||||
};
|
|
||||||
|
|
||||||
let encrypted_room = db
|
|
||||||
.rooms
|
|
||||||
.state_get(current_shortstatehash, &EventType::RoomEncryption, "")?
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
let since_encryption =
|
|
||||||
db.rooms
|
|
||||||
.state_get(since_shortstatehash, &EventType::RoomEncryption, "")?;
|
|
||||||
|
|
||||||
// Calculations:
|
|
||||||
let new_encrypted_room = encrypted_room && since_encryption.is_none();
|
|
||||||
|
|
||||||
let send_member_count = state_events
|
|
||||||
.iter()
|
|
||||||
.any(|event| event.kind == EventType::RoomMember);
|
|
||||||
|
|
||||||
if encrypted_room {
|
|
||||||
for state_event in &state_events {
|
|
||||||
if state_event.kind != EventType::RoomMember {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(state_key) = &state_event.state_key {
|
|
||||||
let user_id = UserId::try_from(state_key.clone())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?;
|
|
||||||
|
|
||||||
if user_id == sender_user {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let new_membership = serde_json::from_str::<RoomMemberEventContent>(
|
|
||||||
state_event.content.get(),
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid PDU in database."))?
|
|
||||||
.membership;
|
|
||||||
|
|
||||||
match new_membership {
|
|
||||||
MembershipState::Join => {
|
|
||||||
// A new user joined an encrypted room
|
|
||||||
if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? {
|
|
||||||
device_list_updates.insert(user_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MembershipState::Leave => {
|
|
||||||
// Write down users that have left encrypted rooms we are in
|
|
||||||
left_encrypted_users.insert(user_id);
|
|
||||||
}
|
|
||||||
_ => {}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if joined_since_last_sync && encrypted_room || new_encrypted_room {
|
|
||||||
// If the user is in a new encrypted room, give them all joined users
|
|
||||||
device_list_updates.extend(
|
|
||||||
db.rooms
|
|
||||||
.room_members(&room_id)
|
|
||||||
.flatten()
|
|
||||||
.filter(|user_id| {
|
|
||||||
// Don't send key updates from the sender to the sender
|
|
||||||
&sender_user != user_id
|
|
||||||
})
|
|
||||||
.filter(|user_id| {
|
|
||||||
// Only send keys if the sender doesn't share an encrypted room with the target already
|
|
||||||
!share_encrypted_room(&db, &sender_user, user_id, &room_id)
|
|
||||||
.unwrap_or(false)
|
|
||||||
}),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let (joined_member_count, invited_member_count, heroes) = if send_member_count {
|
|
||||||
calculate_counts()?
|
|
||||||
} else {
|
|
||||||
(None, None, Vec::new())
|
|
||||||
};
|
|
||||||
|
|
||||||
(
|
|
||||||
heroes,
|
|
||||||
joined_member_count,
|
|
||||||
invited_member_count,
|
|
||||||
joined_since_last_sync,
|
|
||||||
state_events,
|
|
||||||
)
|
|
||||||
};
|
|
||||||
|
|
||||||
// Look for device list updates in this room
|
|
||||||
device_list_updates.extend(
|
|
||||||
db.users
|
|
||||||
.keys_changed(&room_id.to_string(), since, None)
|
|
||||||
.filter_map(|r| r.ok()),
|
|
||||||
);
|
|
||||||
|
|
||||||
let notification_count = if send_notification_counts {
|
|
||||||
Some(
|
|
||||||
db.rooms
|
|
||||||
.notification_count(&sender_user, &room_id)?
|
|
||||||
.try_into()
|
|
||||||
.expect("notification count can't go that high"),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let highlight_count = if send_notification_counts {
|
|
||||||
Some(
|
|
||||||
db.rooms
|
|
||||||
.highlight_count(&sender_user, &room_id)?
|
|
||||||
.try_into()
|
|
||||||
.expect("highlight count can't go that high"),
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
};
|
|
||||||
|
|
||||||
let prev_batch = timeline_pdus
|
|
||||||
.first()
|
|
||||||
.map_or(Ok::<_, Error>(None), |(pdu_id, _)| {
|
|
||||||
Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string()))
|
|
||||||
})?;
|
|
||||||
|
|
||||||
let room_events: Vec<_> = timeline_pdus
|
|
||||||
.iter()
|
|
||||||
.map(|(_, pdu)| pdu.to_sync_room_event())
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
let mut edus: Vec<_> = db
|
|
||||||
.rooms
|
|
||||||
.edus
|
|
||||||
.readreceipts_since(&room_id, since)
|
|
||||||
.filter_map(|r| r.ok()) // Filter out buggy events
|
|
||||||
.map(|(_, _, v)| v)
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since {
|
|
||||||
edus.push(
|
|
||||||
serde_json::from_str(
|
|
||||||
&serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing(
|
|
||||||
db.rooms.edus.typings_all(&room_id)?,
|
|
||||||
))
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
)
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Save the state after this sync so we can send the correct state diff next sync
|
|
||||||
db.rooms
|
|
||||||
.associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?;
|
|
||||||
|
|
||||||
let joined_room = sync_events::JoinedRoom {
|
|
||||||
account_data: sync_events::RoomAccountData {
|
|
||||||
events: db
|
|
||||||
.account_data
|
|
||||||
.changes_since(Some(&room_id), &sender_user, since)?
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|(_, v)| {
|
|
||||||
serde_json::from_str(v.json().get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account event in database."))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
},
|
|
||||||
summary: sync_events::RoomSummary {
|
|
||||||
heroes,
|
|
||||||
joined_member_count: joined_member_count.map(|n| (n as u32).into()),
|
|
||||||
invited_member_count: invited_member_count.map(|n| (n as u32).into()),
|
|
||||||
},
|
|
||||||
unread_notifications: sync_events::UnreadNotificationsCount {
|
|
||||||
highlight_count,
|
|
||||||
notification_count,
|
|
||||||
},
|
|
||||||
timeline: sync_events::Timeline {
|
|
||||||
limited: limited || joined_since_last_sync,
|
|
||||||
prev_batch,
|
|
||||||
events: room_events,
|
|
||||||
},
|
|
||||||
state: sync_events::State {
|
|
||||||
events: state_events
|
|
||||||
.iter()
|
|
||||||
.map(|pdu| pdu.to_sync_state_event())
|
|
||||||
.collect(),
|
|
||||||
},
|
|
||||||
ephemeral: sync_events::Ephemeral { events: edus },
|
|
||||||
};
|
|
||||||
|
|
||||||
if !joined_room.is_empty() {
|
|
||||||
joined_rooms.insert(room_id.clone(), joined_room);
|
|
||||||
}
|
|
||||||
|
|
||||||
// Take presence updates from this room
|
|
||||||
for (user_id, presence) in
|
|
||||||
db.rooms
|
|
||||||
.edus
|
|
||||||
.presence_since(&room_id, since, &db.rooms, &db.globals)?
|
|
||||||
{
|
|
||||||
match presence_updates.entry(user_id) {
|
|
||||||
Entry::Vacant(v) => {
|
|
||||||
v.insert(presence);
|
|
||||||
}
|
|
||||||
Entry::Occupied(mut o) => {
|
|
||||||
let p = o.get_mut();
|
|
||||||
|
|
||||||
// Update existing presence event with more info
|
|
||||||
p.content.presence = presence.content.presence;
|
|
||||||
if let Some(status_msg) = presence.content.status_msg {
|
|
||||||
p.content.status_msg = Some(status_msg);
|
|
||||||
}
|
|
||||||
if let Some(last_active_ago) = presence.content.last_active_ago {
|
|
||||||
p.content.last_active_ago = Some(last_active_ago);
|
|
||||||
}
|
|
||||||
if let Some(displayname) = presence.content.displayname {
|
|
||||||
p.content.displayname = Some(displayname);
|
|
||||||
}
|
|
||||||
if let Some(avatar_url) = presence.content.avatar_url {
|
|
||||||
p.content.avatar_url = Some(avatar_url);
|
|
||||||
}
|
|
||||||
if let Some(currently_active) = presence.content.currently_active {
|
|
||||||
p.content.currently_active = Some(currently_active);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut left_rooms = BTreeMap::new();
|
|
||||||
let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect();
|
|
||||||
for result in all_left_rooms {
|
|
||||||
let (room_id, left_state_events) = result?;
|
|
||||||
|
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
|
||||||
let mutex_insert = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_insert
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
|
||||||
drop(insert_lock);
|
|
||||||
|
|
||||||
let left_count = db.rooms.get_left_count(&room_id, &sender_user)?;
|
|
||||||
|
|
||||||
// Left before last sync
|
|
||||||
if Some(since) >= left_count {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
left_rooms.insert(
|
|
||||||
room_id.clone(),
|
|
||||||
sync_events::LeftRoom {
|
|
||||||
account_data: sync_events::RoomAccountData { events: Vec::new() },
|
|
||||||
timeline: sync_events::Timeline {
|
|
||||||
limited: false,
|
|
||||||
prev_batch: Some(next_batch_string.clone()),
|
|
||||||
events: Vec::new(),
|
|
||||||
},
|
|
||||||
state: sync_events::State {
|
|
||||||
events: left_state_events,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut invited_rooms = BTreeMap::new();
|
|
||||||
let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect();
|
|
||||||
for result in all_invited_rooms {
|
|
||||||
let (room_id, invite_state_events) = result?;
|
|
||||||
|
|
||||||
// Get and drop the lock to wait for remaining operations to finish
|
|
||||||
let mutex_insert = Arc::clone(
|
|
||||||
db.globals
|
|
||||||
.roomid_mutex_insert
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(room_id.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let insert_lock = mutex_insert.lock().unwrap();
|
|
||||||
drop(insert_lock);
|
|
||||||
|
|
||||||
let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?;
|
|
||||||
|
|
||||||
// Invited before last sync
|
|
||||||
if Some(since) >= invite_count {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
invited_rooms.insert(
|
|
||||||
room_id.clone(),
|
|
||||||
sync_events::InvitedRoom {
|
|
||||||
invite_state: sync_events::InviteState {
|
|
||||||
events: invite_state_events,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
for user_id in left_encrypted_users {
|
|
||||||
let still_share_encrypted_room = db
|
|
||||||
.rooms
|
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter_map(|other_room_id| {
|
|
||||||
Some(
|
|
||||||
db.rooms
|
|
||||||
.room_state_get(&other_room_id, &EventType::RoomEncryption, "")
|
|
||||||
.ok()?
|
|
||||||
.is_some(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.all(|encrypted| !encrypted);
|
|
||||||
// If the user doesn't share an encrypted room with the target anymore, we need to tell
|
|
||||||
// them
|
|
||||||
if still_share_encrypted_room {
|
|
||||||
device_list_left.insert(user_id);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Remove all to-device events the device received *last time*
|
|
||||||
db.users
|
|
||||||
.remove_to_device_events(&sender_user, &sender_device, since)?;
|
|
||||||
|
|
||||||
let response = sync_events::Response {
|
|
||||||
next_batch: next_batch_string,
|
|
||||||
rooms: sync_events::Rooms {
|
|
||||||
leave: left_rooms,
|
|
||||||
join: joined_rooms,
|
|
||||||
invite: invited_rooms,
|
|
||||||
knock: BTreeMap::new(), // TODO
|
|
||||||
},
|
|
||||||
presence: sync_events::Presence {
|
|
||||||
events: presence_updates
|
|
||||||
.into_iter()
|
|
||||||
.map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully"))
|
|
||||||
.collect(),
|
|
||||||
},
|
|
||||||
account_data: sync_events::GlobalAccountData {
|
|
||||||
events: db
|
|
||||||
.account_data
|
|
||||||
.changes_since(None, &sender_user, since)?
|
|
||||||
.into_iter()
|
|
||||||
.filter_map(|(_, v)| {
|
|
||||||
serde_json::from_str(v.json().get())
|
|
||||||
.map_err(|_| Error::bad_database("Invalid account event in database."))
|
|
||||||
.ok()
|
|
||||||
})
|
|
||||||
.collect(),
|
|
||||||
},
|
|
||||||
device_lists: sync_events::DeviceLists {
|
|
||||||
changed: device_list_updates.into_iter().collect(),
|
|
||||||
left: device_list_left.into_iter().collect(),
|
|
||||||
},
|
|
||||||
device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?,
|
|
||||||
to_device: sync_events::ToDevice {
|
|
||||||
events: db
|
|
||||||
.users
|
|
||||||
.get_to_device_events(&sender_user, &sender_device)?,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
|
|
||||||
// TODO: Retry the endpoint instead of returning (waiting for #118)
|
|
||||||
if !full_state
|
|
||||||
&& response.rooms.is_empty()
|
|
||||||
&& response.presence.is_empty()
|
|
||||||
&& response.account_data.is_empty()
|
|
||||||
&& response.device_lists.is_empty()
|
|
||||||
&& response.to_device.is_empty()
|
|
||||||
{
|
|
||||||
// Hang a few seconds so requests are not spammed
|
|
||||||
// Stop hanging if new info arrives
|
|
||||||
let mut duration = timeout.unwrap_or_default();
|
|
||||||
if duration.as_secs() > 30 {
|
|
||||||
duration = Duration::from_secs(30);
|
|
||||||
}
|
|
||||||
let _ = tokio::time::timeout(duration, watcher).await;
|
|
||||||
Ok((response, false))
|
|
||||||
} else {
|
|
||||||
Ok((response, since != next_batch)) // Only cache if we made progress
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip(db))]
|
|
||||||
fn share_encrypted_room(
|
|
||||||
db: &Database,
|
|
||||||
sender_user: &UserId,
|
|
||||||
user_id: &UserId,
|
|
||||||
ignore_room: &RoomId,
|
|
||||||
) -> Result<bool> {
|
|
||||||
Ok(db
|
|
||||||
.rooms
|
|
||||||
.get_shared_rooms(vec![sender_user.clone(), user_id.clone()])?
|
|
||||||
.filter_map(|r| r.ok())
|
|
||||||
.filter(|room_id| room_id != ignore_room)
|
|
||||||
.filter_map(|other_room_id| {
|
|
||||||
Some(
|
|
||||||
db.rooms
|
|
||||||
.room_state_get(&other_room_id, &EventType::RoomEncryption, "")
|
|
||||||
.ok()?
|
|
||||||
.is_some(),
|
|
||||||
)
|
|
||||||
})
|
|
||||||
.any(|encrypted| encrypted))
|
|
||||||
}
|
|
@ -1,124 +0,0 @@
|
|||||||
use crate::{database::DatabaseGuard, ConduitResult, Ruma};
|
|
||||||
use ruma::{
|
|
||||||
api::client::r0::tag::{create_tag, delete_tag, get_tags},
|
|
||||||
events::{
|
|
||||||
tag::{TagEvent, TagEventContent},
|
|
||||||
EventType,
|
|
||||||
},
|
|
||||||
};
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::{delete, get, put};
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Adds a tag to the room.
|
|
||||||
///
|
|
||||||
/// - Inserts the tag into the tag event of the room account data.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn update_tag_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_tag::Request<'_>>,
|
|
||||||
) -> ConduitResult<create_tag::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut tags_event = db
|
|
||||||
.account_data
|
|
||||||
.get(Some(&body.room_id), sender_user, EventType::Tag)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
tags_event
|
|
||||||
.content
|
|
||||||
.tags
|
|
||||||
.insert(body.tag.clone().into(), body.tag_info.clone());
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
EventType::Tag,
|
|
||||||
&tags_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(create_tag::Response {}.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}`
|
|
||||||
///
|
|
||||||
/// Deletes a tag from the room.
|
|
||||||
///
|
|
||||||
/// - Removes the tag from the tag event of the room account data.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn delete_tag_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<delete_tag::Request<'_>>,
|
|
||||||
) -> ConduitResult<delete_tag::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
let mut tags_event = db
|
|
||||||
.account_data
|
|
||||||
.get(Some(&body.room_id), sender_user, EventType::Tag)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
});
|
|
||||||
tags_event.content.tags.remove(&body.tag.clone().into());
|
|
||||||
|
|
||||||
db.account_data.update(
|
|
||||||
Some(&body.room_id),
|
|
||||||
sender_user,
|
|
||||||
EventType::Tag,
|
|
||||||
&tags_event,
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
db.flush()?;
|
|
||||||
|
|
||||||
Ok(delete_tag::Response {}.into())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags`
|
|
||||||
///
|
|
||||||
/// Returns tags on the room.
|
|
||||||
///
|
|
||||||
/// - Gets the tag event of the room account data.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn get_tags_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<get_tags::Request<'_>>,
|
|
||||||
) -> ConduitResult<get_tags::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
Ok(get_tags::Response {
|
|
||||||
tags: db
|
|
||||||
.account_data
|
|
||||||
.get(Some(&body.room_id), sender_user, EventType::Tag)?
|
|
||||||
.unwrap_or_else(|| TagEvent {
|
|
||||||
content: TagEventContent {
|
|
||||||
tags: BTreeMap::new(),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
.content
|
|
||||||
.tags,
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
@ -1,22 +0,0 @@
|
|||||||
use crate::ConduitResult;
|
|
||||||
use ruma::api::client::r0::thirdparty::get_protocols;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::get;
|
|
||||||
use std::collections::BTreeMap;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/thirdparty/protocols`
|
|
||||||
///
|
|
||||||
/// TODO: Fetches all metadata about protocols supported by the homeserver.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
get("/_matrix/client/r0/thirdparty/protocols")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument]
|
|
||||||
pub async fn get_protocols_route() -> ConduitResult<get_protocols::Response> {
|
|
||||||
// TODO
|
|
||||||
Ok(get_protocols::Response {
|
|
||||||
protocols: BTreeMap::new(),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
@ -1,36 +0,0 @@
|
|||||||
use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma};
|
|
||||||
use create_typing_event::Typing;
|
|
||||||
use ruma::api::client::r0::typing::create_typing_event;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::put;
|
|
||||||
|
|
||||||
/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}`
|
|
||||||
///
|
|
||||||
/// Sets the typing state of the sender user.
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub fn create_typing_event_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<create_typing_event::Request<'_>>,
|
|
||||||
) -> ConduitResult<create_typing_event::Response> {
|
|
||||||
let sender_user = body.sender_user.as_ref().expect("user is authenticated");
|
|
||||||
|
|
||||||
if let Typing::Yes(duration) = body.state {
|
|
||||||
db.rooms.edus.typing_add(
|
|
||||||
sender_user,
|
|
||||||
&body.room_id,
|
|
||||||
duration.as_millis() as u64 + utils::millis_since_unix_epoch(),
|
|
||||||
&db.globals,
|
|
||||||
)?;
|
|
||||||
} else {
|
|
||||||
db.rooms
|
|
||||||
.edus
|
|
||||||
.typing_remove(sender_user, &body.room_id, &db.globals)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(create_typing_event::Response {}.into())
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
use crate::ConduitResult;
|
|
||||||
use ruma::api::client::unversioned::get_supported_versions;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::get;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/versions`
|
|
||||||
///
|
|
||||||
/// Get the versions of the specification and unstable features supported by this server.
|
|
||||||
///
|
|
||||||
/// - Versions take the form MAJOR.MINOR.PATCH
|
|
||||||
/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value
|
|
||||||
/// - Unstable features are namespaced and may include version information in their name
|
|
||||||
///
|
|
||||||
/// Note: Unstable features are used while developing new features. Clients should avoid using
|
|
||||||
/// unstable features in their stable releases
|
|
||||||
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))]
|
|
||||||
#[tracing::instrument]
|
|
||||||
pub async fn get_supported_versions_route() -> ConduitResult<get_supported_versions::Response> {
|
|
||||||
let mut resp =
|
|
||||||
get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]);
|
|
||||||
|
|
||||||
resp.unstable_features
|
|
||||||
.insert("org.matrix.e2e_cross_signing".to_owned(), true);
|
|
||||||
|
|
||||||
Ok(resp.into())
|
|
||||||
}
|
|
@ -1,59 +0,0 @@
|
|||||||
use crate::{database::DatabaseGuard, ConduitResult, Ruma};
|
|
||||||
use ruma::api::client::r0::user_directory::search_users;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::post;
|
|
||||||
|
|
||||||
/// # `POST /_matrix/client/r0/user_directory/search`
|
|
||||||
///
|
|
||||||
/// Searches all known users for a match.
|
|
||||||
///
|
|
||||||
/// - TODO: Hide users that are not in any public rooms?
|
|
||||||
#[cfg_attr(
|
|
||||||
feature = "conduit_bin",
|
|
||||||
post("/_matrix/client/r0/user_directory/search", data = "<body>")
|
|
||||||
)]
|
|
||||||
#[tracing::instrument(skip(db, body))]
|
|
||||||
pub async fn search_users_route(
|
|
||||||
db: DatabaseGuard,
|
|
||||||
body: Ruma<search_users::Request<'_>>,
|
|
||||||
) -> ConduitResult<search_users::Response> {
|
|
||||||
let limit = u64::from(body.limit) as usize;
|
|
||||||
|
|
||||||
let mut users = db.users.iter().filter_map(|user_id| {
|
|
||||||
// Filter out buggy users (they should not exist, but you never know...)
|
|
||||||
let user_id = user_id.ok()?;
|
|
||||||
|
|
||||||
let user = search_users::User {
|
|
||||||
user_id: user_id.clone(),
|
|
||||||
display_name: db.users.displayname(&user_id).ok()?,
|
|
||||||
avatar_url: db.users.avatar_url(&user_id).ok()?,
|
|
||||||
};
|
|
||||||
|
|
||||||
let user_id_matches = user
|
|
||||||
.user_id
|
|
||||||
.to_string()
|
|
||||||
.to_lowercase()
|
|
||||||
.contains(&body.search_term.to_lowercase());
|
|
||||||
|
|
||||||
let user_displayname_matches = user
|
|
||||||
.display_name
|
|
||||||
.as_ref()
|
|
||||||
.filter(|name| {
|
|
||||||
name.to_lowercase()
|
|
||||||
.contains(&body.search_term.to_lowercase())
|
|
||||||
})
|
|
||||||
.is_some();
|
|
||||||
|
|
||||||
if !user_id_matches && !user_displayname_matches {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
|
|
||||||
Some(user)
|
|
||||||
});
|
|
||||||
|
|
||||||
let results = users.by_ref().take(limit).collect();
|
|
||||||
let limited = users.next().is_some();
|
|
||||||
|
|
||||||
Ok(search_users::Response { results, limited }.into())
|
|
||||||
}
|
|
@ -1,21 +0,0 @@
|
|||||||
use crate::ConduitResult;
|
|
||||||
use ruma::api::client::r0::voip::get_turn_server_info;
|
|
||||||
use std::time::Duration;
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
use rocket::get;
|
|
||||||
|
|
||||||
/// # `GET /_matrix/client/r0/voip/turnServer`
|
|
||||||
///
|
|
||||||
/// TODO: Returns information about the recommended turn server.
|
|
||||||
#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))]
|
|
||||||
#[tracing::instrument]
|
|
||||||
pub async fn turn_server_route() -> ConduitResult<get_turn_server_info::Response> {
|
|
||||||
Ok(get_turn_server_info::Response {
|
|
||||||
username: "".to_owned(),
|
|
||||||
password: "".to_owned(),
|
|
||||||
uris: Vec::new(),
|
|
||||||
ttl: Duration::from_secs(60 * 60 * 24),
|
|
||||||
}
|
|
||||||
.into())
|
|
||||||
}
|
|
@ -0,0 +1,263 @@
|
|||||||
|
use std::{
|
||||||
|
collections::BTreeMap,
|
||||||
|
fmt,
|
||||||
|
net::{IpAddr, Ipv4Addr},
|
||||||
|
};
|
||||||
|
|
||||||
|
use ruma::{OwnedServerName, RoomVersionId};
|
||||||
|
use serde::{de::IgnoredAny, Deserialize};
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
mod proxy;
|
||||||
|
|
||||||
|
use self::proxy::ProxyConfig;
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct Config {
|
||||||
|
#[serde(default = "default_address")]
|
||||||
|
pub address: IpAddr,
|
||||||
|
#[serde(default = "default_port")]
|
||||||
|
pub port: u16,
|
||||||
|
pub tls: Option<TlsConfig>,
|
||||||
|
|
||||||
|
pub server_name: OwnedServerName,
|
||||||
|
#[serde(default = "default_database_backend")]
|
||||||
|
pub database_backend: String,
|
||||||
|
pub database_path: String,
|
||||||
|
#[serde(default = "default_db_cache_capacity_mb")]
|
||||||
|
pub db_cache_capacity_mb: f64,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub enable_lightning_bolt: bool,
|
||||||
|
#[serde(default = "default_conduit_cache_capacity_modifier")]
|
||||||
|
pub conduit_cache_capacity_modifier: f64,
|
||||||
|
#[serde(default = "default_rocksdb_max_open_files")]
|
||||||
|
pub rocksdb_max_open_files: i32,
|
||||||
|
#[serde(default = "default_pdu_cache_capacity")]
|
||||||
|
pub pdu_cache_capacity: u32,
|
||||||
|
#[serde(default = "default_cleanup_second_interval")]
|
||||||
|
pub cleanup_second_interval: u32,
|
||||||
|
#[serde(default = "default_max_request_size")]
|
||||||
|
pub max_request_size: u32,
|
||||||
|
#[serde(default = "default_max_concurrent_requests")]
|
||||||
|
pub max_concurrent_requests: u16,
|
||||||
|
#[serde(default = "false_fn")]
|
||||||
|
pub allow_registration: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_encryption: bool,
|
||||||
|
#[serde(default = "false_fn")]
|
||||||
|
pub allow_federation: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_room_creation: bool,
|
||||||
|
#[serde(default = "true_fn")]
|
||||||
|
pub allow_unstable_room_versions: bool,
|
||||||
|
#[serde(default = "default_default_room_version")]
|
||||||
|
pub default_room_version: RoomVersionId,
|
||||||
|
#[serde(default = "false_fn")]
|
||||||
|
pub allow_jaeger: bool,
|
||||||
|
#[serde(default = "false_fn")]
|
||||||
|
pub tracing_flame: bool,
|
||||||
|
#[serde(default)]
|
||||||
|
pub proxy: ProxyConfig,
|
||||||
|
pub jwt_secret: Option<String>,
|
||||||
|
#[serde(default = "Vec::new")]
|
||||||
|
pub trusted_servers: Vec<OwnedServerName>,
|
||||||
|
#[serde(default = "default_log")]
|
||||||
|
pub log: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub turn_username: String,
|
||||||
|
#[serde(default)]
|
||||||
|
pub turn_password: String,
|
||||||
|
#[serde(default = "Vec::new")]
|
||||||
|
pub turn_uris: Vec<String>,
|
||||||
|
#[serde(default)]
|
||||||
|
pub turn_secret: String,
|
||||||
|
#[serde(default = "default_turn_ttl")]
|
||||||
|
pub turn_ttl: u64,
|
||||||
|
|
||||||
|
pub emergency_password: Option<String>,
|
||||||
|
|
||||||
|
#[serde(flatten)]
|
||||||
|
pub catchall: BTreeMap<String, IgnoredAny>,
|
||||||
|
}
|
||||||
|
|
||||||
|
#[derive(Clone, Debug, Deserialize)]
|
||||||
|
pub struct TlsConfig {
|
||||||
|
pub certs: String,
|
||||||
|
pub key: String,
|
||||||
|
}
|
||||||
|
|
||||||
|
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
||||||
|
|
||||||
|
impl Config {
|
||||||
|
pub fn warn_deprecated(&self) {
|
||||||
|
let mut was_deprecated = false;
|
||||||
|
for key in self
|
||||||
|
.catchall
|
||||||
|
.keys()
|
||||||
|
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
|
||||||
|
{
|
||||||
|
warn!("Config parameter {} is deprecated", key);
|
||||||
|
was_deprecated = true;
|
||||||
|
}
|
||||||
|
|
||||||
|
if was_deprecated {
|
||||||
|
warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl fmt::Display for Config {
|
||||||
|
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||||
|
// Prepare a list of config values to show
|
||||||
|
let lines = [
|
||||||
|
("Server name", self.server_name.host()),
|
||||||
|
("Database backend", &self.database_backend),
|
||||||
|
("Database path", &self.database_path),
|
||||||
|
(
|
||||||
|
"Database cache capacity (MB)",
|
||||||
|
&self.db_cache_capacity_mb.to_string(),
|
||||||
|
),
|
||||||
|
(
|
||||||
|
"Cache capacity modifier",
|
||||||
|
&self.conduit_cache_capacity_modifier.to_string(),
|
||||||
|
),
|
||||||
|
#[cfg(feature = "rocksdb")]
|
||||||
|
(
|
||||||
|
"Maximum open files for RocksDB",
|
||||||
|
&self.rocksdb_max_open_files.to_string(),
|
||||||
|
),
|
||||||
|
("PDU cache capacity", &self.pdu_cache_capacity.to_string()),
|
||||||
|
(
|
||||||
|
"Cleanup interval in seconds",
|
||||||
|
&self.cleanup_second_interval.to_string(),
|
||||||
|
),
|
||||||
|
("Maximum request size", &self.max_request_size.to_string()),
|
||||||
|
(
|
||||||
|
"Maximum concurrent requests",
|
||||||
|
&self.max_concurrent_requests.to_string(),
|
||||||
|
),
|
||||||
|
("Allow registration", &self.allow_registration.to_string()),
|
||||||
|
(
|
||||||
|
"Enabled lightning bolt",
|
||||||
|
&self.enable_lightning_bolt.to_string(),
|
||||||
|
),
|
||||||
|
("Allow encryption", &self.allow_encryption.to_string()),
|
||||||
|
("Allow federation", &self.allow_federation.to_string()),
|
||||||
|
("Allow room creation", &self.allow_room_creation.to_string()),
|
||||||
|
(
|
||||||
|
"JWT secret",
|
||||||
|
match self.jwt_secret {
|
||||||
|
Some(_) => "set",
|
||||||
|
None => "not set",
|
||||||
|
},
|
||||||
|
),
|
||||||
|
("Trusted servers", {
|
||||||
|
let mut lst = vec![];
|
||||||
|
for server in &self.trusted_servers {
|
||||||
|
lst.push(server.host());
|
||||||
|
}
|
||||||
|
&lst.join(", ")
|
||||||
|
}),
|
||||||
|
(
|
||||||
|
"TURN username",
|
||||||
|
if self.turn_username.is_empty() {
|
||||||
|
"not set"
|
||||||
|
} else {
|
||||||
|
&self.turn_username
|
||||||
|
},
|
||||||
|
),
|
||||||
|
("TURN password", {
|
||||||
|
if self.turn_password.is_empty() {
|
||||||
|
"not set"
|
||||||
|
} else {
|
||||||
|
"set"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
("TURN secret", {
|
||||||
|
if self.turn_secret.is_empty() {
|
||||||
|
"not set"
|
||||||
|
} else {
|
||||||
|
"set"
|
||||||
|
}
|
||||||
|
}),
|
||||||
|
("Turn TTL", &self.turn_ttl.to_string()),
|
||||||
|
("Turn URIs", {
|
||||||
|
let mut lst = vec![];
|
||||||
|
for item in self.turn_uris.to_vec().into_iter().enumerate() {
|
||||||
|
let (_, uri): (usize, String) = item;
|
||||||
|
lst.push(uri);
|
||||||
|
}
|
||||||
|
&lst.join(", ")
|
||||||
|
}),
|
||||||
|
];
|
||||||
|
|
||||||
|
let mut msg: String = "Active config values:\n\n".to_string();
|
||||||
|
|
||||||
|
for line in lines.into_iter().enumerate() {
|
||||||
|
msg += &format!("{}: {}\n", line.1 .0, line.1 .1);
|
||||||
|
}
|
||||||
|
|
||||||
|
write!(f, "{}", msg)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn false_fn() -> bool {
|
||||||
|
false
|
||||||
|
}
|
||||||
|
|
||||||
|
fn true_fn() -> bool {
|
||||||
|
true
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_address() -> IpAddr {
|
||||||
|
Ipv4Addr::LOCALHOST.into()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_port() -> u16 {
|
||||||
|
8000
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_database_backend() -> String {
|
||||||
|
"sqlite".to_owned()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_db_cache_capacity_mb() -> f64 {
|
||||||
|
10.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_conduit_cache_capacity_modifier() -> f64 {
|
||||||
|
1.0
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_rocksdb_max_open_files() -> i32 {
|
||||||
|
20
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_pdu_cache_capacity() -> u32 {
|
||||||
|
150_000
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_cleanup_second_interval() -> u32 {
|
||||||
|
60 // every minute
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_max_request_size() -> u32 {
|
||||||
|
20 * 1024 * 1024 // Default to 20 MB
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_max_concurrent_requests() -> u16 {
|
||||||
|
100
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_log() -> String {
|
||||||
|
"warn,state_res=warn,_=off,sled=off".to_owned()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn default_turn_ttl() -> u64 {
|
||||||
|
60 * 60 * 24
|
||||||
|
}
|
||||||
|
|
||||||
|
// I know, it's a great name
|
||||||
|
pub fn default_default_room_version() -> RoomVersionId {
|
||||||
|
RoomVersionId::V9
|
||||||
|
}
|
@ -1,977 +0,0 @@
|
|||||||
pub mod abstraction;
|
|
||||||
|
|
||||||
pub mod account_data;
|
|
||||||
pub mod admin;
|
|
||||||
pub mod appservice;
|
|
||||||
pub mod globals;
|
|
||||||
pub mod key_backups;
|
|
||||||
pub mod media;
|
|
||||||
pub mod proxy;
|
|
||||||
pub mod pusher;
|
|
||||||
pub mod rooms;
|
|
||||||
pub mod sending;
|
|
||||||
pub mod transaction_ids;
|
|
||||||
pub mod uiaa;
|
|
||||||
pub mod users;
|
|
||||||
|
|
||||||
use crate::{utils, Error, Result};
|
|
||||||
use abstraction::DatabaseEngine;
|
|
||||||
use directories::ProjectDirs;
|
|
||||||
use lru_cache::LruCache;
|
|
||||||
use rocket::{
|
|
||||||
futures::{channel::mpsc, stream::FuturesUnordered, StreamExt},
|
|
||||||
outcome::{try_outcome, IntoOutcome},
|
|
||||||
request::{FromRequest, Request},
|
|
||||||
Shutdown, State,
|
|
||||||
};
|
|
||||||
use ruma::{DeviceId, EventId, RoomId, ServerName, UserId};
|
|
||||||
use serde::{de::IgnoredAny, Deserialize};
|
|
||||||
use std::{
|
|
||||||
collections::{BTreeMap, HashMap, HashSet},
|
|
||||||
convert::{TryFrom, TryInto},
|
|
||||||
fs::{self, remove_dir_all},
|
|
||||||
io::Write,
|
|
||||||
mem::size_of,
|
|
||||||
ops::Deref,
|
|
||||||
path::Path,
|
|
||||||
sync::{Arc, Mutex, RwLock},
|
|
||||||
};
|
|
||||||
use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore};
|
|
||||||
use tracing::{debug, error, warn};
|
|
||||||
|
|
||||||
use self::proxy::ProxyConfig;
|
|
||||||
|
|
||||||
#[derive(Clone, Debug, Deserialize)]
|
|
||||||
pub struct Config {
|
|
||||||
server_name: Box<ServerName>,
|
|
||||||
database_path: String,
|
|
||||||
#[serde(default = "default_db_cache_capacity_mb")]
|
|
||||||
db_cache_capacity_mb: f64,
|
|
||||||
#[serde(default = "default_pdu_cache_capacity")]
|
|
||||||
pdu_cache_capacity: u32,
|
|
||||||
#[serde(default = "default_sqlite_wal_clean_second_interval")]
|
|
||||||
sqlite_wal_clean_second_interval: u32,
|
|
||||||
#[serde(default = "default_max_request_size")]
|
|
||||||
max_request_size: u32,
|
|
||||||
#[serde(default = "default_max_concurrent_requests")]
|
|
||||||
max_concurrent_requests: u16,
|
|
||||||
#[serde(default = "false_fn")]
|
|
||||||
allow_registration: bool,
|
|
||||||
#[serde(default = "true_fn")]
|
|
||||||
allow_encryption: bool,
|
|
||||||
#[serde(default = "false_fn")]
|
|
||||||
allow_federation: bool,
|
|
||||||
#[serde(default = "true_fn")]
|
|
||||||
allow_room_creation: bool,
|
|
||||||
#[serde(default = "false_fn")]
|
|
||||||
pub allow_jaeger: bool,
|
|
||||||
#[serde(default = "false_fn")]
|
|
||||||
pub tracing_flame: bool,
|
|
||||||
#[serde(default)]
|
|
||||||
proxy: ProxyConfig,
|
|
||||||
jwt_secret: Option<String>,
|
|
||||||
#[serde(default = "Vec::new")]
|
|
||||||
trusted_servers: Vec<Box<ServerName>>,
|
|
||||||
#[serde(default = "default_log")]
|
|
||||||
pub log: String,
|
|
||||||
|
|
||||||
#[serde(flatten)]
|
|
||||||
catchall: BTreeMap<String, IgnoredAny>,
|
|
||||||
}
|
|
||||||
|
|
||||||
const DEPRECATED_KEYS: &[&str] = &["cache_capacity"];
|
|
||||||
|
|
||||||
impl Config {
|
|
||||||
pub fn warn_deprecated(&self) {
|
|
||||||
let mut was_deprecated = false;
|
|
||||||
for key in self
|
|
||||||
.catchall
|
|
||||||
.keys()
|
|
||||||
.filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key))
|
|
||||||
{
|
|
||||||
warn!("Config parameter {} is deprecated", key);
|
|
||||||
was_deprecated = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
if was_deprecated {
|
|
||||||
warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
fn false_fn() -> bool {
|
|
||||||
false
|
|
||||||
}
|
|
||||||
|
|
||||||
fn true_fn() -> bool {
|
|
||||||
true
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_db_cache_capacity_mb() -> f64 {
|
|
||||||
200.0
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_pdu_cache_capacity() -> u32 {
|
|
||||||
100_000
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_sqlite_wal_clean_second_interval() -> u32 {
|
|
||||||
1 * 60 // every minute
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_max_request_size() -> u32 {
|
|
||||||
20 * 1024 * 1024 // Default to 20 MB
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_max_concurrent_requests() -> u16 {
|
|
||||||
100
|
|
||||||
}
|
|
||||||
|
|
||||||
fn default_log() -> String {
|
|
||||||
"info,state_res=warn,rocket=off,_=off,sled=off".to_owned()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "sled")]
|
|
||||||
pub type Engine = abstraction::sled::Engine;
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
pub type Engine = abstraction::sqlite::Engine;
|
|
||||||
|
|
||||||
#[cfg(feature = "heed")]
|
|
||||||
pub type Engine = abstraction::heed::Engine;
|
|
||||||
|
|
||||||
pub struct Database {
|
|
||||||
_db: Arc<Engine>,
|
|
||||||
pub globals: globals::Globals,
|
|
||||||
pub users: users::Users,
|
|
||||||
pub uiaa: uiaa::Uiaa,
|
|
||||||
pub rooms: rooms::Rooms,
|
|
||||||
pub account_data: account_data::AccountData,
|
|
||||||
pub media: media::Media,
|
|
||||||
pub key_backups: key_backups::KeyBackups,
|
|
||||||
pub transaction_ids: transaction_ids::TransactionIds,
|
|
||||||
pub sending: sending::Sending,
|
|
||||||
pub admin: admin::Admin,
|
|
||||||
pub appservice: appservice::Appservice,
|
|
||||||
pub pusher: pusher::PushData,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Database {
|
|
||||||
/// Tries to remove the old database but ignores all errors.
|
|
||||||
pub fn try_remove(server_name: &str) -> Result<()> {
|
|
||||||
let mut path = ProjectDirs::from("xyz", "koesters", "conduit")
|
|
||||||
.ok_or_else(|| Error::bad_config("The OS didn't return a valid home directory path."))?
|
|
||||||
.data_dir()
|
|
||||||
.to_path_buf();
|
|
||||||
path.push(server_name);
|
|
||||||
let _ = remove_dir_all(path);
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
fn check_sled_or_sqlite_db(config: &Config) -> Result<()> {
|
|
||||||
#[cfg(feature = "backend_sqlite")]
|
|
||||||
{
|
|
||||||
let path = Path::new(&config.database_path);
|
|
||||||
|
|
||||||
let sled_exists = path.join("db").exists();
|
|
||||||
let sqlite_exists = path.join("conduit.db").exists();
|
|
||||||
if sled_exists {
|
|
||||||
if sqlite_exists {
|
|
||||||
// most likely an in-place directory, only warn
|
|
||||||
warn!("Both sled and sqlite databases are detected in database directory");
|
|
||||||
warn!("Currently running from the sqlite database, but consider removing sled database files to free up space")
|
|
||||||
} else {
|
|
||||||
error!(
|
|
||||||
"Sled database detected, conduit now uses sqlite for database operations"
|
|
||||||
);
|
|
||||||
error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite");
|
|
||||||
return Err(Error::bad_config(
|
|
||||||
"sled database detected, migrate to sqlite",
|
|
||||||
));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Load an existing database or create a new one.
|
|
||||||
pub async fn load_or_create(config: &Config) -> Result<Arc<TokioRwLock<Self>>> {
|
|
||||||
Self::check_sled_or_sqlite_db(config)?;
|
|
||||||
|
|
||||||
if !Path::new(&config.database_path).exists() {
|
|
||||||
std::fs::create_dir_all(&config.database_path)
|
|
||||||
.map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?;
|
|
||||||
}
|
|
||||||
|
|
||||||
let builder = Engine::open(config)?;
|
|
||||||
|
|
||||||
if config.max_request_size < 1024 {
|
|
||||||
eprintln!("ERROR: Max request size is less than 1KB. Please increase it.");
|
|
||||||
}
|
|
||||||
|
|
||||||
let (admin_sender, admin_receiver) = mpsc::unbounded();
|
|
||||||
let (sending_sender, sending_receiver) = mpsc::unbounded();
|
|
||||||
|
|
||||||
let db = Arc::new(TokioRwLock::from(Self {
|
|
||||||
_db: builder.clone(),
|
|
||||||
users: users::Users {
|
|
||||||
userid_password: builder.open_tree("userid_password")?,
|
|
||||||
userid_displayname: builder.open_tree("userid_displayname")?,
|
|
||||||
userid_avatarurl: builder.open_tree("userid_avatarurl")?,
|
|
||||||
userid_blurhash: builder.open_tree("userid_blurhash")?,
|
|
||||||
userdeviceid_token: builder.open_tree("userdeviceid_token")?,
|
|
||||||
userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?,
|
|
||||||
userid_devicelistversion: builder.open_tree("userid_devicelistversion")?,
|
|
||||||
token_userdeviceid: builder.open_tree("token_userdeviceid")?,
|
|
||||||
onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?,
|
|
||||||
userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?,
|
|
||||||
keychangeid_userid: builder.open_tree("keychangeid_userid")?,
|
|
||||||
keyid_key: builder.open_tree("keyid_key")?,
|
|
||||||
userid_masterkeyid: builder.open_tree("userid_masterkeyid")?,
|
|
||||||
userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?,
|
|
||||||
userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?,
|
|
||||||
todeviceid_events: builder.open_tree("todeviceid_events")?,
|
|
||||||
},
|
|
||||||
uiaa: uiaa::Uiaa {
|
|
||||||
userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?,
|
|
||||||
userdevicesessionid_uiaarequest: builder
|
|
||||||
.open_tree("userdevicesessionid_uiaarequest")?,
|
|
||||||
},
|
|
||||||
rooms: rooms::Rooms {
|
|
||||||
edus: rooms::RoomEdus {
|
|
||||||
readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?,
|
|
||||||
roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt
|
|
||||||
roomuserid_lastprivatereadupdate: builder
|
|
||||||
.open_tree("roomuserid_lastprivatereadupdate")?,
|
|
||||||
typingid_userid: builder.open_tree("typingid_userid")?,
|
|
||||||
roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?,
|
|
||||||
presenceid_presence: builder.open_tree("presenceid_presence")?,
|
|
||||||
userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?,
|
|
||||||
},
|
|
||||||
pduid_pdu: builder.open_tree("pduid_pdu")?,
|
|
||||||
eventid_pduid: builder.open_tree("eventid_pduid")?,
|
|
||||||
roomid_pduleaves: builder.open_tree("roomid_pduleaves")?,
|
|
||||||
|
|
||||||
alias_roomid: builder.open_tree("alias_roomid")?,
|
|
||||||
aliasid_alias: builder.open_tree("aliasid_alias")?,
|
|
||||||
publicroomids: builder.open_tree("publicroomids")?,
|
|
||||||
|
|
||||||
tokenids: builder.open_tree("tokenids")?,
|
|
||||||
|
|
||||||
roomserverids: builder.open_tree("roomserverids")?,
|
|
||||||
serverroomids: builder.open_tree("serverroomids")?,
|
|
||||||
userroomid_joined: builder.open_tree("userroomid_joined")?,
|
|
||||||
roomuserid_joined: builder.open_tree("roomuserid_joined")?,
|
|
||||||
roomid_joinedcount: builder.open_tree("roomid_joinedcount")?,
|
|
||||||
roomid_invitedcount: builder.open_tree("roomid_invitedcount")?,
|
|
||||||
roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?,
|
|
||||||
userroomid_invitestate: builder.open_tree("userroomid_invitestate")?,
|
|
||||||
roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?,
|
|
||||||
userroomid_leftstate: builder.open_tree("userroomid_leftstate")?,
|
|
||||||
roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?,
|
|
||||||
|
|
||||||
userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?,
|
|
||||||
userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?,
|
|
||||||
|
|
||||||
statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?,
|
|
||||||
shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?,
|
|
||||||
|
|
||||||
shorteventid_authchain: builder.open_tree("shorteventid_authchain")?,
|
|
||||||
|
|
||||||
roomid_shortroomid: builder.open_tree("roomid_shortroomid")?,
|
|
||||||
|
|
||||||
shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?,
|
|
||||||
eventid_shorteventid: builder.open_tree("eventid_shorteventid")?,
|
|
||||||
shorteventid_eventid: builder.open_tree("shorteventid_eventid")?,
|
|
||||||
shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?,
|
|
||||||
roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?,
|
|
||||||
roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?,
|
|
||||||
statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?,
|
|
||||||
|
|
||||||
eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?,
|
|
||||||
softfailedeventids: builder.open_tree("softfailedeventids")?,
|
|
||||||
|
|
||||||
referencedevents: builder.open_tree("referencedevents")?,
|
|
||||||
pdu_cache: Mutex::new(LruCache::new(
|
|
||||||
config
|
|
||||||
.pdu_cache_capacity
|
|
||||||
.try_into()
|
|
||||||
.expect("pdu cache capacity fits into usize"),
|
|
||||||
)),
|
|
||||||
auth_chain_cache: Mutex::new(LruCache::new(1_000_000)),
|
|
||||||
shorteventid_cache: Mutex::new(LruCache::new(1_000_000)),
|
|
||||||
eventidshort_cache: Mutex::new(LruCache::new(1_000_000)),
|
|
||||||
shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)),
|
|
||||||
statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)),
|
|
||||||
our_real_users_cache: RwLock::new(HashMap::new()),
|
|
||||||
appservice_in_room_cache: RwLock::new(HashMap::new()),
|
|
||||||
stateinfo_cache: Mutex::new(LruCache::new(1000)),
|
|
||||||
},
|
|
||||||
account_data: account_data::AccountData {
|
|
||||||
roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?,
|
|
||||||
roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?,
|
|
||||||
},
|
|
||||||
media: media::Media {
|
|
||||||
mediaid_file: builder.open_tree("mediaid_file")?,
|
|
||||||
},
|
|
||||||
key_backups: key_backups::KeyBackups {
|
|
||||||
backupid_algorithm: builder.open_tree("backupid_algorithm")?,
|
|
||||||
backupid_etag: builder.open_tree("backupid_etag")?,
|
|
||||||
backupkeyid_backup: builder.open_tree("backupkeyid_backup")?,
|
|
||||||
},
|
|
||||||
transaction_ids: transaction_ids::TransactionIds {
|
|
||||||
userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?,
|
|
||||||
},
|
|
||||||
sending: sending::Sending {
|
|
||||||
servername_educount: builder.open_tree("servername_educount")?,
|
|
||||||
servernameevent_data: builder.open_tree("servernameevent_data")?,
|
|
||||||
servercurrentevent_data: builder.open_tree("servercurrentevent_data")?,
|
|
||||||
maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)),
|
|
||||||
sender: sending_sender,
|
|
||||||
},
|
|
||||||
admin: admin::Admin {
|
|
||||||
sender: admin_sender,
|
|
||||||
},
|
|
||||||
appservice: appservice::Appservice {
|
|
||||||
cached_registrations: Arc::new(RwLock::new(HashMap::new())),
|
|
||||||
id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?,
|
|
||||||
},
|
|
||||||
pusher: pusher::PushData {
|
|
||||||
senderkey_pusher: builder.open_tree("senderkey_pusher")?,
|
|
||||||
},
|
|
||||||
globals: globals::Globals::load(
|
|
||||||
builder.open_tree("global")?,
|
|
||||||
builder.open_tree("server_signingkeys")?,
|
|
||||||
config.clone(),
|
|
||||||
)?,
|
|
||||||
}));
|
|
||||||
|
|
||||||
{
|
|
||||||
let db = db.read().await;
|
|
||||||
// MIGRATIONS
|
|
||||||
// TODO: database versions of new dbs should probably not be 0
|
|
||||||
if db.globals.database_version()? < 1 {
|
|
||||||
for (roomserverid, _) in db.rooms.roomserverids.iter() {
|
|
||||||
let mut parts = roomserverid.split(|&b| b == 0xff);
|
|
||||||
let room_id = parts.next().expect("split always returns one element");
|
|
||||||
let servername = match parts.next() {
|
|
||||||
Some(s) => s,
|
|
||||||
None => {
|
|
||||||
error!("Migration: Invalid roomserverid in db.");
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
};
|
|
||||||
let mut serverroomid = servername.to_vec();
|
|
||||||
serverroomid.push(0xff);
|
|
||||||
serverroomid.extend_from_slice(room_id);
|
|
||||||
|
|
||||||
db.rooms.serverroomids.insert(&serverroomid, &[])?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(1)?;
|
|
||||||
|
|
||||||
println!("Migration: 0 -> 1 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 2 {
|
|
||||||
// We accidentally inserted hashed versions of "" into the db instead of just ""
|
|
||||||
for (userid, password) in db.users.userid_password.iter() {
|
|
||||||
let password = utils::string_from_bytes(&password);
|
|
||||||
|
|
||||||
let empty_hashed_password = password.map_or(false, |password| {
|
|
||||||
argon2::verify_encoded(&password, b"").unwrap_or(false)
|
|
||||||
});
|
|
||||||
|
|
||||||
if empty_hashed_password {
|
|
||||||
db.users.userid_password.insert(&userid, b"")?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(2)?;
|
|
||||||
|
|
||||||
println!("Migration: 1 -> 2 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 3 {
|
|
||||||
// Move media to filesystem
|
|
||||||
for (key, content) in db.media.mediaid_file.iter() {
|
|
||||||
if content.is_empty() {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
let path = db.globals.get_media_file(&key);
|
|
||||||
let mut file = fs::File::create(path)?;
|
|
||||||
file.write_all(&content)?;
|
|
||||||
db.media.mediaid_file.insert(&key, &[])?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(3)?;
|
|
||||||
|
|
||||||
println!("Migration: 2 -> 3 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 4 {
|
|
||||||
// Add federated users to db as deactivated
|
|
||||||
for our_user in db.users.iter() {
|
|
||||||
let our_user = our_user?;
|
|
||||||
if db.users.is_deactivated(&our_user)? {
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
for room in db.rooms.rooms_joined(&our_user) {
|
|
||||||
for user in db.rooms.room_members(&room?) {
|
|
||||||
let user = user?;
|
|
||||||
if user.server_name() != db.globals.server_name() {
|
|
||||||
println!("Migration: Creating user {}", user);
|
|
||||||
db.users.create(&user, None)?;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(4)?;
|
|
||||||
|
|
||||||
println!("Migration: 3 -> 4 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 5 {
|
|
||||||
// Upgrade user data store
|
|
||||||
for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() {
|
|
||||||
let mut parts = roomuserdataid.split(|&b| b == 0xff);
|
|
||||||
let room_id = parts.next().unwrap();
|
|
||||||
let user_id = parts.next().unwrap();
|
|
||||||
let event_type = roomuserdataid.rsplit(|&b| b == 0xff).next().unwrap();
|
|
||||||
|
|
||||||
let mut key = room_id.to_vec();
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(user_id);
|
|
||||||
key.push(0xff);
|
|
||||||
key.extend_from_slice(event_type);
|
|
||||||
|
|
||||||
db.account_data
|
|
||||||
.roomusertype_roomuserdataid
|
|
||||||
.insert(&key, &roomuserdataid)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(5)?;
|
|
||||||
|
|
||||||
println!("Migration: 4 -> 5 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 6 {
|
|
||||||
// Set room member count
|
|
||||||
for (roomid, _) in db.rooms.roomid_shortstatehash.iter() {
|
|
||||||
let room_id =
|
|
||||||
RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap();
|
|
||||||
|
|
||||||
db.rooms.update_joined_count(&room_id, &db)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(6)?;
|
|
||||||
|
|
||||||
println!("Migration: 5 -> 6 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 7 {
|
|
||||||
// Upgrade state store
|
|
||||||
let mut last_roomstates: HashMap<RoomId, u64> = HashMap::new();
|
|
||||||
let mut current_sstatehash: Option<u64> = None;
|
|
||||||
let mut current_room = None;
|
|
||||||
let mut current_state = HashSet::new();
|
|
||||||
let mut counter = 0;
|
|
||||||
|
|
||||||
let mut handle_state =
|
|
||||||
|current_sstatehash: u64,
|
|
||||||
current_room: &RoomId,
|
|
||||||
current_state: HashSet<_>,
|
|
||||||
last_roomstates: &mut HashMap<_, _>| {
|
|
||||||
counter += 1;
|
|
||||||
println!("counter: {}", counter);
|
|
||||||
let last_roomsstatehash = last_roomstates.get(current_room);
|
|
||||||
|
|
||||||
let states_parents = last_roomsstatehash.map_or_else(
|
|
||||||
|| Ok(Vec::new()),
|
|
||||||
|&last_roomsstatehash| {
|
|
||||||
db.rooms.load_shortstatehash_info(dbg!(last_roomsstatehash))
|
|
||||||
},
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let (statediffnew, statediffremoved) =
|
|
||||||
if let Some(parent_stateinfo) = states_parents.last() {
|
|
||||||
let statediffnew = current_state
|
|
||||||
.difference(&parent_stateinfo.1)
|
|
||||||
.copied()
|
|
||||||
.collect::<HashSet<_>>();
|
|
||||||
|
|
||||||
let statediffremoved = parent_stateinfo
|
|
||||||
.1
|
|
||||||
.difference(¤t_state)
|
|
||||||
.copied()
|
|
||||||
.collect::<HashSet<_>>();
|
|
||||||
|
|
||||||
(statediffnew, statediffremoved)
|
|
||||||
} else {
|
|
||||||
(current_state, HashSet::new())
|
|
||||||
};
|
|
||||||
|
|
||||||
db.rooms.save_state_from_diff(
|
|
||||||
dbg!(current_sstatehash),
|
|
||||||
statediffnew,
|
|
||||||
statediffremoved,
|
|
||||||
2, // every state change is 2 event changes on average
|
|
||||||
states_parents,
|
|
||||||
)?;
|
|
||||||
|
|
||||||
/*
|
|
||||||
let mut tmp = db.rooms.load_shortstatehash_info(¤t_sstatehash, &db)?;
|
|
||||||
let state = tmp.pop().unwrap();
|
|
||||||
println!(
|
|
||||||
"{}\t{}{:?}: {:?} + {:?} - {:?}",
|
|
||||||
current_room,
|
|
||||||
" ".repeat(tmp.len()),
|
|
||||||
utils::u64_from_bytes(¤t_sstatehash).unwrap(),
|
|
||||||
tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()),
|
|
||||||
state
|
|
||||||
.2
|
|
||||||
.iter()
|
|
||||||
.map(|b| utils::u64_from_bytes(&b[size_of::<u64>()..]).unwrap())
|
|
||||||
.collect::<Vec<_>>(),
|
|
||||||
state
|
|
||||||
.3
|
|
||||||
.iter()
|
|
||||||
.map(|b| utils::u64_from_bytes(&b[size_of::<u64>()..]).unwrap())
|
|
||||||
.collect::<Vec<_>>()
|
|
||||||
);
|
|
||||||
*/
|
|
||||||
|
|
||||||
Ok::<_, Error>(())
|
|
||||||
};
|
|
||||||
|
|
||||||
for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() {
|
|
||||||
let sstatehash = utils::u64_from_bytes(&k[0..size_of::<u64>()])
|
|
||||||
.expect("number of bytes is correct");
|
|
||||||
let sstatekey = k[size_of::<u64>()..].to_vec();
|
|
||||||
if Some(sstatehash) != current_sstatehash {
|
|
||||||
if let Some(current_sstatehash) = current_sstatehash {
|
|
||||||
handle_state(
|
|
||||||
current_sstatehash,
|
|
||||||
current_room.as_ref().unwrap(),
|
|
||||||
current_state,
|
|
||||||
&mut last_roomstates,
|
|
||||||
)?;
|
|
||||||
last_roomstates
|
|
||||||
.insert(current_room.clone().unwrap(), current_sstatehash);
|
|
||||||
}
|
|
||||||
current_state = HashSet::new();
|
|
||||||
current_sstatehash = Some(sstatehash);
|
|
||||||
|
|
||||||
let event_id = db
|
|
||||||
.rooms
|
|
||||||
.shorteventid_eventid
|
|
||||||
.get(&seventid)
|
|
||||||
.unwrap()
|
|
||||||
.unwrap();
|
|
||||||
let event_id =
|
|
||||||
EventId::try_from(utils::string_from_bytes(&event_id).unwrap())
|
|
||||||
.unwrap();
|
|
||||||
let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap();
|
|
||||||
|
|
||||||
if Some(&pdu.room_id) != current_room.as_ref() {
|
|
||||||
current_room = Some(pdu.room_id.clone());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut val = sstatekey;
|
|
||||||
val.extend_from_slice(&seventid);
|
|
||||||
current_state.insert(val.try_into().expect("size is correct"));
|
|
||||||
}
|
|
||||||
|
|
||||||
if let Some(current_sstatehash) = current_sstatehash {
|
|
||||||
handle_state(
|
|
||||||
current_sstatehash,
|
|
||||||
current_room.as_ref().unwrap(),
|
|
||||||
current_state,
|
|
||||||
&mut last_roomstates,
|
|
||||||
)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(7)?;
|
|
||||||
|
|
||||||
println!("Migration: 6 -> 7 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 8 {
|
|
||||||
// Generate short room ids for all rooms
|
|
||||||
for (room_id, _) in db.rooms.roomid_shortstatehash.iter() {
|
|
||||||
let shortroomid = db.globals.next_count()?.to_be_bytes();
|
|
||||||
db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?;
|
|
||||||
println!("Migration: 8");
|
|
||||||
}
|
|
||||||
// Update pduids db layout
|
|
||||||
let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| {
|
|
||||||
if !key.starts_with(b"!") {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let mut parts = key.splitn(2, |&b| b == 0xff);
|
|
||||||
let room_id = parts.next().unwrap();
|
|
||||||
let count = parts.next().unwrap();
|
|
||||||
|
|
||||||
let short_room_id = db
|
|
||||||
.rooms
|
|
||||||
.roomid_shortroomid
|
|
||||||
.get(room_id)
|
|
||||||
.unwrap()
|
|
||||||
.expect("shortroomid should exist");
|
|
||||||
|
|
||||||
let mut new_key = short_room_id;
|
|
||||||
new_key.extend_from_slice(count);
|
|
||||||
|
|
||||||
Some((new_key, v))
|
|
||||||
});
|
|
||||||
|
|
||||||
db.rooms.pduid_pdu.insert_batch(&mut batch)?;
|
|
||||||
|
|
||||||
let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| {
|
|
||||||
if !value.starts_with(b"!") {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let mut parts = value.splitn(2, |&b| b == 0xff);
|
|
||||||
let room_id = parts.next().unwrap();
|
|
||||||
let count = parts.next().unwrap();
|
|
||||||
|
|
||||||
let short_room_id = db
|
|
||||||
.rooms
|
|
||||||
.roomid_shortroomid
|
|
||||||
.get(room_id)
|
|
||||||
.unwrap()
|
|
||||||
.expect("shortroomid should exist");
|
|
||||||
|
|
||||||
let mut new_value = short_room_id;
|
|
||||||
new_value.extend_from_slice(count);
|
|
||||||
|
|
||||||
Some((k, new_value))
|
|
||||||
});
|
|
||||||
|
|
||||||
db.rooms.eventid_pduid.insert_batch(&mut batch2)?;
|
|
||||||
|
|
||||||
db.globals.bump_database_version(8)?;
|
|
||||||
|
|
||||||
println!("Migration: 7 -> 8 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 9 {
|
|
||||||
// Update tokenids db layout
|
|
||||||
let mut iter = db
|
|
||||||
.rooms
|
|
||||||
.tokenids
|
|
||||||
.iter()
|
|
||||||
.filter_map(|(key, _)| {
|
|
||||||
if !key.starts_with(b"!") {
|
|
||||||
return None;
|
|
||||||
}
|
|
||||||
let mut parts = key.splitn(4, |&b| b == 0xff);
|
|
||||||
let room_id = parts.next().unwrap();
|
|
||||||
let word = parts.next().unwrap();
|
|
||||||
let _pdu_id_room = parts.next().unwrap();
|
|
||||||
let pdu_id_count = parts.next().unwrap();
|
|
||||||
|
|
||||||
let short_room_id = db
|
|
||||||
.rooms
|
|
||||||
.roomid_shortroomid
|
|
||||||
.get(room_id)
|
|
||||||
.unwrap()
|
|
||||||
.expect("shortroomid should exist");
|
|
||||||
let mut new_key = short_room_id;
|
|
||||||
new_key.extend_from_slice(word);
|
|
||||||
new_key.push(0xff);
|
|
||||||
new_key.extend_from_slice(pdu_id_count);
|
|
||||||
println!("old {:?}", key);
|
|
||||||
println!("new {:?}", new_key);
|
|
||||||
Some((new_key, Vec::new()))
|
|
||||||
})
|
|
||||||
.peekable();
|
|
||||||
|
|
||||||
while iter.peek().is_some() {
|
|
||||||
db.rooms
|
|
||||||
.tokenids
|
|
||||||
.insert_batch(&mut iter.by_ref().take(1000))?;
|
|
||||||
println!("smaller batch done");
|
|
||||||
}
|
|
||||||
|
|
||||||
println!("Deleting starts");
|
|
||||||
|
|
||||||
let batch2: Vec<_> = db
|
|
||||||
.rooms
|
|
||||||
.tokenids
|
|
||||||
.iter()
|
|
||||||
.filter_map(|(key, _)| {
|
|
||||||
if key.starts_with(b"!") {
|
|
||||||
println!("del {:?}", key);
|
|
||||||
Some(key)
|
|
||||||
} else {
|
|
||||||
None
|
|
||||||
}
|
|
||||||
})
|
|
||||||
.collect();
|
|
||||||
|
|
||||||
for key in batch2 {
|
|
||||||
println!("del");
|
|
||||||
db.rooms.tokenids.remove(&key)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(9)?;
|
|
||||||
|
|
||||||
println!("Migration: 8 -> 9 finished");
|
|
||||||
}
|
|
||||||
|
|
||||||
if db.globals.database_version()? < 10 {
|
|
||||||
// Add other direction for shortstatekeys
|
|
||||||
for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() {
|
|
||||||
db.rooms
|
|
||||||
.shortstatekey_statekey
|
|
||||||
.insert(&shortstatekey, &statekey)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Force E2EE device list updates so we can send them over federation
|
|
||||||
for user_id in db.users.iter().filter_map(|r| r.ok()) {
|
|
||||||
db.users
|
|
||||||
.mark_device_key_update(&user_id, &db.rooms, &db.globals)?;
|
|
||||||
}
|
|
||||||
|
|
||||||
db.globals.bump_database_version(10)?;
|
|
||||||
|
|
||||||
println!("Migration: 9 -> 10 finished");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
let guard = db.read().await;
|
|
||||||
|
|
||||||
// This data is probably outdated
|
|
||||||
guard.rooms.edus.presenceid_presence.clear()?;
|
|
||||||
|
|
||||||
guard.admin.start_handler(Arc::clone(&db), admin_receiver);
|
|
||||||
guard
|
|
||||||
.sending
|
|
||||||
.start_handler(Arc::clone(&db), sending_receiver);
|
|
||||||
|
|
||||||
drop(guard);
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
{
|
|
||||||
Self::start_wal_clean_task(Arc::clone(&db), config).await;
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(db)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "conduit_bin")]
|
|
||||||
pub async fn start_on_shutdown_tasks(db: Arc<TokioRwLock<Self>>, shutdown: Shutdown) {
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
shutdown.await;
|
|
||||||
|
|
||||||
info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers...");
|
|
||||||
|
|
||||||
db.read().await.globals.rotate.fire();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) {
|
|
||||||
let userid_bytes = user_id.as_bytes().to_vec();
|
|
||||||
let mut userid_prefix = userid_bytes.clone();
|
|
||||||
userid_prefix.push(0xff);
|
|
||||||
|
|
||||||
let mut userdeviceid_prefix = userid_prefix.clone();
|
|
||||||
userdeviceid_prefix.extend_from_slice(device_id.as_bytes());
|
|
||||||
userdeviceid_prefix.push(0xff);
|
|
||||||
|
|
||||||
let mut futures = FuturesUnordered::new();
|
|
||||||
|
|
||||||
// Return when *any* user changed his key
|
|
||||||
// TODO: only send for user they share a room with
|
|
||||||
futures.push(
|
|
||||||
self.users
|
|
||||||
.todeviceid_events
|
|
||||||
.watch_prefix(&userdeviceid_prefix),
|
|
||||||
);
|
|
||||||
|
|
||||||
futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix));
|
|
||||||
futures.push(
|
|
||||||
self.rooms
|
|
||||||
.userroomid_invitestate
|
|
||||||
.watch_prefix(&userid_prefix),
|
|
||||||
);
|
|
||||||
futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix));
|
|
||||||
futures.push(
|
|
||||||
self.rooms
|
|
||||||
.userroomid_notificationcount
|
|
||||||
.watch_prefix(&userid_prefix),
|
|
||||||
);
|
|
||||||
futures.push(
|
|
||||||
self.rooms
|
|
||||||
.userroomid_highlightcount
|
|
||||||
.watch_prefix(&userid_prefix),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Events for rooms we are in
|
|
||||||
for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) {
|
|
||||||
let short_roomid = self
|
|
||||||
.rooms
|
|
||||||
.get_shortroomid(&room_id)
|
|
||||||
.ok()
|
|
||||||
.flatten()
|
|
||||||
.expect("room exists")
|
|
||||||
.to_be_bytes()
|
|
||||||
.to_vec();
|
|
||||||
|
|
||||||
let roomid_bytes = room_id.as_bytes().to_vec();
|
|
||||||
let mut roomid_prefix = roomid_bytes.clone();
|
|
||||||
roomid_prefix.push(0xff);
|
|
||||||
|
|
||||||
// PDUs
|
|
||||||
futures.push(self.rooms.pduid_pdu.watch_prefix(&short_roomid));
|
|
||||||
|
|
||||||
// EDUs
|
|
||||||
futures.push(
|
|
||||||
self.rooms
|
|
||||||
.edus
|
|
||||||
.roomid_lasttypingupdate
|
|
||||||
.watch_prefix(&roomid_bytes),
|
|
||||||
);
|
|
||||||
|
|
||||||
futures.push(
|
|
||||||
self.rooms
|
|
||||||
.edus
|
|
||||||
.readreceiptid_readreceipt
|
|
||||||
.watch_prefix(&roomid_prefix),
|
|
||||||
);
|
|
||||||
|
|
||||||
// Key changes
|
|
||||||
futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix));
|
|
||||||
|
|
||||||
// Room account data
|
|
||||||
let mut roomuser_prefix = roomid_prefix.clone();
|
|
||||||
roomuser_prefix.extend_from_slice(&userid_prefix);
|
|
||||||
|
|
||||||
futures.push(
|
|
||||||
self.account_data
|
|
||||||
.roomusertype_roomuserdataid
|
|
||||||
.watch_prefix(&roomuser_prefix),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
let mut globaluserdata_prefix = vec![0xff];
|
|
||||||
globaluserdata_prefix.extend_from_slice(&userid_prefix);
|
|
||||||
|
|
||||||
futures.push(
|
|
||||||
self.account_data
|
|
||||||
.roomusertype_roomuserdataid
|
|
||||||
.watch_prefix(&globaluserdata_prefix),
|
|
||||||
);
|
|
||||||
|
|
||||||
// More key changes (used when user is not joined to any rooms)
|
|
||||||
futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix));
|
|
||||||
|
|
||||||
// One time keys
|
|
||||||
futures.push(
|
|
||||||
self.users
|
|
||||||
.userid_lastonetimekeyupdate
|
|
||||||
.watch_prefix(&userid_bytes),
|
|
||||||
);
|
|
||||||
|
|
||||||
futures.push(Box::pin(self.globals.rotate.watch()));
|
|
||||||
|
|
||||||
// Wait until one of them finds something
|
|
||||||
futures.next().await;
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
pub fn flush(&self) -> Result<()> {
|
|
||||||
let start = std::time::Instant::now();
|
|
||||||
|
|
||||||
let res = self._db.flush();
|
|
||||||
|
|
||||||
debug!("flush: took {:?}", start.elapsed());
|
|
||||||
|
|
||||||
res
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
pub fn flush_wal(&self) -> Result<()> {
|
|
||||||
self._db.flush_wal()
|
|
||||||
}
|
|
||||||
|
|
||||||
#[cfg(feature = "sqlite")]
|
|
||||||
#[tracing::instrument(skip(db, config))]
|
|
||||||
pub async fn start_wal_clean_task(db: Arc<TokioRwLock<Self>>, config: &Config) {
|
|
||||||
use tokio::time::interval;
|
|
||||||
|
|
||||||
#[cfg(unix)]
|
|
||||||
use tokio::signal::unix::{signal, SignalKind};
|
|
||||||
use tracing::info;
|
|
||||||
|
|
||||||
use std::time::{Duration, Instant};
|
|
||||||
|
|
||||||
let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64);
|
|
||||||
|
|
||||||
tokio::spawn(async move {
|
|
||||||
let mut i = interval(timer_interval);
|
|
||||||
#[cfg(unix)]
|
|
||||||
let mut s = signal(SignalKind::hangup()).unwrap();
|
|
||||||
|
|
||||||
loop {
|
|
||||||
#[cfg(unix)]
|
|
||||||
tokio::select! {
|
|
||||||
_ = i.tick() => {
|
|
||||||
info!("wal-trunc: Timer ticked");
|
|
||||||
}
|
|
||||||
_ = s.recv() => {
|
|
||||||
info!("wal-trunc: Received SIGHUP");
|
|
||||||
}
|
|
||||||
};
|
|
||||||
#[cfg(not(unix))]
|
|
||||||
{
|
|
||||||
i.tick().await;
|
|
||||||
info!("wal-trunc: Timer ticked")
|
|
||||||
}
|
|
||||||
|
|
||||||
let start = Instant::now();
|
|
||||||
if let Err(e) = db.read().await.flush_wal() {
|
|
||||||
error!("wal-trunc: Errored: {}", e);
|
|
||||||
} else {
|
|
||||||
info!("wal-trunc: Flushed in {:?}", start.elapsed());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub struct DatabaseGuard(OwnedRwLockReadGuard<Database>);
|
|
||||||
|
|
||||||
impl Deref for DatabaseGuard {
|
|
||||||
type Target = OwnedRwLockReadGuard<Database>;
|
|
||||||
|
|
||||||
fn deref(&self) -> &Self::Target {
|
|
||||||
&self.0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#[rocket::async_trait]
|
|
||||||
impl<'r> FromRequest<'r> for DatabaseGuard {
|
|
||||||
type Error = ();
|
|
||||||
|
|
||||||
async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome<Self, ()> {
|
|
||||||
let db = try_outcome!(req.guard::<&State<Arc<TokioRwLock<Database>>>>().await);
|
|
||||||
|
|
||||||
Ok(DatabaseGuard(Arc::clone(db).read_owned().await)).or_forward(())
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl From<OwnedRwLockReadGuard<Database>> for DatabaseGuard {
|
|
||||||
fn from(val: OwnedRwLockReadGuard<Database>) -> Self {
|
|
||||||
Self(val)
|
|
||||||
}
|
|
||||||
}
|
|
@ -0,0 +1,197 @@
|
|||||||
|
use crate::{
|
||||||
|
database::{
|
||||||
|
abstraction::{watchers::Watchers, DatabaseEngine, Tree},
|
||||||
|
Config,
|
||||||
|
},
|
||||||
|
Result,
|
||||||
|
};
|
||||||
|
use persy::{ByteVec, OpenOptions, Persy, Transaction, TransactionConfig, ValueMode};
|
||||||
|
|
||||||
|
use std::{future::Future, pin::Pin, sync::Arc};
|
||||||
|
|
||||||
|
use tracing::warn;
|
||||||
|
|
||||||
|
pub struct Engine {
|
||||||
|
persy: Persy,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl DatabaseEngine for Arc<Engine> {
|
||||||
|
fn open(config: &Config) -> Result<Self> {
|
||||||
|
let mut cfg = persy::Config::new();
|
||||||
|
cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64);
|
||||||
|
|
||||||
|
let persy = OpenOptions::new()
|
||||||
|
.create(true)
|
||||||
|
.config(cfg)
|
||||||
|
.open(&format!("{}/db.persy", config.database_path))?;
|
||||||
|
Ok(Arc::new(Engine { persy }))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn Tree>> {
|
||||||
|
// Create if it doesn't exist
|
||||||
|
if !self.persy.exists_index(name)? {
|
||||||
|
let mut tx = self.persy.begin()?;
|
||||||
|
tx.create_index::<ByteVec, ByteVec>(name, ValueMode::Replace)?;
|
||||||
|
tx.prepare()?.commit()?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Arc::new(PersyTree {
|
||||||
|
persy: self.persy.clone(),
|
||||||
|
name: name.to_owned(),
|
||||||
|
watchers: Watchers::default(),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&self) -> Result<()> {
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct PersyTree {
|
||||||
|
persy: Persy,
|
||||||
|
name: String,
|
||||||
|
watchers: Watchers,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl PersyTree {
|
||||||
|
fn begin(&self) -> Result<Transaction> {
|
||||||
|
Ok(self
|
||||||
|
.persy
|
||||||
|
.begin_with(TransactionConfig::new().set_background_sync(true))?)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Tree for PersyTree {
|
||||||
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
|
let result = self
|
||||||
|
.persy
|
||||||
|
.get::<ByteVec, ByteVec>(&self.name, &ByteVec::from(key))?
|
||||||
|
.next()
|
||||||
|
.map(|v| (*v).to_owned());
|
||||||
|
Ok(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
|
self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?;
|
||||||
|
self.watchers.wake(key);
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
||||||
|
let mut tx = self.begin()?;
|
||||||
|
for (key, value) in iter {
|
||||||
|
tx.put::<ByteVec, ByteVec>(
|
||||||
|
&self.name,
|
||||||
|
ByteVec::from(key.clone()),
|
||||||
|
ByteVec::from(value),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
tx.prepare()?.commit()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
||||||
|
let mut tx = self.begin()?;
|
||||||
|
for key in iter {
|
||||||
|
let old = tx
|
||||||
|
.get::<ByteVec, ByteVec>(&self.name, &ByteVec::from(key.clone()))?
|
||||||
|
.next()
|
||||||
|
.map(|v| (*v).to_owned());
|
||||||
|
let new = crate::utils::increment(old.as_deref()).unwrap();
|
||||||
|
tx.put::<ByteVec, ByteVec>(&self.name, ByteVec::from(key), ByteVec::from(new))?;
|
||||||
|
}
|
||||||
|
tx.prepare()?.commit()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||||
|
let mut tx = self.begin()?;
|
||||||
|
tx.remove::<ByteVec, ByteVec>(&self.name, ByteVec::from(key), None)?;
|
||||||
|
tx.prepare()?.commit()?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
let iter = self.persy.range::<ByteVec, ByteVec, _>(&self.name, ..);
|
||||||
|
match iter {
|
||||||
|
Ok(iter) => Box::new(iter.filter_map(|(k, v)| {
|
||||||
|
v.into_iter()
|
||||||
|
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||||
|
.next()
|
||||||
|
})),
|
||||||
|
Err(e) => {
|
||||||
|
warn!("error iterating {:?}", e);
|
||||||
|
Box::new(std::iter::empty())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_from<'a>(
|
||||||
|
&'a self,
|
||||||
|
from: &[u8],
|
||||||
|
backwards: bool,
|
||||||
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
let range = if backwards {
|
||||||
|
self.persy
|
||||||
|
.range::<ByteVec, ByteVec, _>(&self.name, ..=ByteVec::from(from))
|
||||||
|
} else {
|
||||||
|
self.persy
|
||||||
|
.range::<ByteVec, ByteVec, _>(&self.name, ByteVec::from(from)..)
|
||||||
|
};
|
||||||
|
match range {
|
||||||
|
Ok(iter) => {
|
||||||
|
let map = iter.filter_map(|(k, v)| {
|
||||||
|
v.into_iter()
|
||||||
|
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||||
|
.next()
|
||||||
|
});
|
||||||
|
if backwards {
|
||||||
|
Box::new(map.rev())
|
||||||
|
} else {
|
||||||
|
Box::new(map)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("error iterating with prefix {:?}", e);
|
||||||
|
Box::new(std::iter::empty())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
|
self.increment_batch(&mut Some(key.to_owned()).into_iter())?;
|
||||||
|
Ok(self.get(key)?.unwrap())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_prefix<'a>(
|
||||||
|
&'a self,
|
||||||
|
prefix: Vec<u8>,
|
||||||
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
let range_prefix = ByteVec::from(prefix.clone());
|
||||||
|
let range = self
|
||||||
|
.persy
|
||||||
|
.range::<ByteVec, ByteVec, _>(&self.name, range_prefix..);
|
||||||
|
|
||||||
|
match range {
|
||||||
|
Ok(iter) => {
|
||||||
|
let owned_prefix = prefix.clone();
|
||||||
|
Box::new(
|
||||||
|
iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix))
|
||||||
|
.filter_map(|(k, v)| {
|
||||||
|
v.into_iter()
|
||||||
|
.map(|val| ((*k).to_owned().into(), (*val).to_owned().into()))
|
||||||
|
.next()
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
Err(e) => {
|
||||||
|
warn!("error scanning prefix {:?}", e);
|
||||||
|
Box::new(std::iter::empty())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
|
self.watchers.watch(prefix)
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,238 @@
|
|||||||
|
use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree};
|
||||||
|
use crate::{utils, Result};
|
||||||
|
use std::{
|
||||||
|
future::Future,
|
||||||
|
pin::Pin,
|
||||||
|
sync::{Arc, RwLock},
|
||||||
|
};
|
||||||
|
|
||||||
|
pub struct Engine {
|
||||||
|
rocks: rocksdb::DBWithThreadMode<rocksdb::MultiThreaded>,
|
||||||
|
max_open_files: i32,
|
||||||
|
cache: rocksdb::Cache,
|
||||||
|
old_cfs: Vec<String>,
|
||||||
|
}
|
||||||
|
|
||||||
|
pub struct RocksDbEngineTree<'a> {
|
||||||
|
db: Arc<Engine>,
|
||||||
|
name: &'a str,
|
||||||
|
watchers: Watchers,
|
||||||
|
write_lock: RwLock<()>,
|
||||||
|
}
|
||||||
|
|
||||||
|
fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options {
|
||||||
|
let mut block_based_options = rocksdb::BlockBasedOptions::default();
|
||||||
|
block_based_options.set_block_cache(rocksdb_cache);
|
||||||
|
|
||||||
|
// "Difference of spinning disk"
|
||||||
|
// https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html
|
||||||
|
block_based_options.set_block_size(4 * 1024);
|
||||||
|
block_based_options.set_cache_index_and_filter_blocks(true);
|
||||||
|
|
||||||
|
let mut db_opts = rocksdb::Options::default();
|
||||||
|
db_opts.set_block_based_table_factory(&block_based_options);
|
||||||
|
db_opts.set_optimize_filters_for_hits(true);
|
||||||
|
db_opts.set_skip_stats_update_on_db_open(true);
|
||||||
|
db_opts.set_level_compaction_dynamic_level_bytes(true);
|
||||||
|
db_opts.set_target_file_size_base(256 * 1024 * 1024);
|
||||||
|
//db_opts.set_compaction_readahead_size(2 * 1024 * 1024);
|
||||||
|
//db_opts.set_use_direct_reads(true);
|
||||||
|
//db_opts.set_use_direct_io_for_flush_and_compaction(true);
|
||||||
|
db_opts.create_if_missing(true);
|
||||||
|
db_opts.increase_parallelism(num_cpus::get() as i32);
|
||||||
|
db_opts.set_max_open_files(max_open_files);
|
||||||
|
db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd);
|
||||||
|
db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level);
|
||||||
|
db_opts.optimize_level_style_compaction(10 * 1024 * 1024);
|
||||||
|
|
||||||
|
let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1);
|
||||||
|
db_opts.set_prefix_extractor(prefix_extractor);
|
||||||
|
|
||||||
|
db_opts
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KeyValueDatabaseEngine for Arc<Engine> {
|
||||||
|
fn open(config: &Config) -> Result<Self> {
|
||||||
|
let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize;
|
||||||
|
let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap();
|
||||||
|
|
||||||
|
let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache);
|
||||||
|
|
||||||
|
let cfs = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::list_cf(
|
||||||
|
&db_opts,
|
||||||
|
&config.database_path,
|
||||||
|
)
|
||||||
|
.unwrap_or_default();
|
||||||
|
|
||||||
|
let db = rocksdb::DBWithThreadMode::<rocksdb::MultiThreaded>::open_cf_descriptors(
|
||||||
|
&db_opts,
|
||||||
|
&config.database_path,
|
||||||
|
cfs.iter().map(|name| {
|
||||||
|
rocksdb::ColumnFamilyDescriptor::new(
|
||||||
|
name,
|
||||||
|
db_options(config.rocksdb_max_open_files, &rocksdb_cache),
|
||||||
|
)
|
||||||
|
}),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(Arc::new(Engine {
|
||||||
|
rocks: db,
|
||||||
|
max_open_files: config.rocksdb_max_open_files,
|
||||||
|
cache: rocksdb_cache,
|
||||||
|
old_cfs: cfs,
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn open_tree(&self, name: &'static str) -> Result<Arc<dyn KvTree>> {
|
||||||
|
if !self.old_cfs.contains(&name.to_owned()) {
|
||||||
|
// Create if it didn't exist
|
||||||
|
let _ = self
|
||||||
|
.rocks
|
||||||
|
.create_cf(name, &db_options(self.max_open_files, &self.cache));
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(Arc::new(RocksDbEngineTree {
|
||||||
|
name,
|
||||||
|
db: Arc::clone(self),
|
||||||
|
watchers: Watchers::default(),
|
||||||
|
write_lock: RwLock::new(()),
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn flush(&self) -> Result<()> {
|
||||||
|
// TODO?
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn memory_usage(&self) -> Result<String> {
|
||||||
|
let stats =
|
||||||
|
rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?;
|
||||||
|
Ok(format!(
|
||||||
|
"Approximate memory usage of all the mem-tables: {:.3} MB\n\
|
||||||
|
Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\
|
||||||
|
Approximate memory usage of all the table readers: {:.3} MB\n\
|
||||||
|
Approximate memory usage by cache: {:.3} MB\n\
|
||||||
|
Approximate memory usage by cache pinned: {:.3} MB\n\
|
||||||
|
",
|
||||||
|
stats.mem_table_total as f64 / 1024.0 / 1024.0,
|
||||||
|
stats.mem_table_unflushed as f64 / 1024.0 / 1024.0,
|
||||||
|
stats.mem_table_readers_total as f64 / 1024.0 / 1024.0,
|
||||||
|
stats.cache_total as f64 / 1024.0 / 1024.0,
|
||||||
|
self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0,
|
||||||
|
))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl RocksDbEngineTree<'_> {
|
||||||
|
fn cf(&self) -> Arc<rocksdb::BoundColumnFamily<'_>> {
|
||||||
|
self.db.rocks.cf_handle(self.name).unwrap()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
impl KvTree for RocksDbEngineTree<'_> {
|
||||||
|
fn get(&self, key: &[u8]) -> Result<Option<Vec<u8>>> {
|
||||||
|
Ok(self.db.rocks.get_cf(&self.cf(), key)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> {
|
||||||
|
let lock = self.write_lock.read().unwrap();
|
||||||
|
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||||
|
drop(lock);
|
||||||
|
|
||||||
|
self.watchers.wake(key);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn insert_batch<'a>(&self, iter: &mut dyn Iterator<Item = (Vec<u8>, Vec<u8>)>) -> Result<()> {
|
||||||
|
for (key, value) in iter {
|
||||||
|
self.db.rocks.put_cf(&self.cf(), key, value)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove(&self, key: &[u8]) -> Result<()> {
|
||||||
|
Ok(self.db.rocks.delete_cf(&self.cf(), key)?)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter<'a>(&'a self) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(&self.cf(), rocksdb::IteratorMode::Start)
|
||||||
|
//.map(|r| r.unwrap())
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn iter_from<'a>(
|
||||||
|
&'a self,
|
||||||
|
from: &[u8],
|
||||||
|
backwards: bool,
|
||||||
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(
|
||||||
|
&self.cf(),
|
||||||
|
rocksdb::IteratorMode::From(
|
||||||
|
from,
|
||||||
|
if backwards {
|
||||||
|
rocksdb::Direction::Reverse
|
||||||
|
} else {
|
||||||
|
rocksdb::Direction::Forward
|
||||||
|
},
|
||||||
|
),
|
||||||
|
)
|
||||||
|
//.map(|r| r.unwrap())
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v))),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment(&self, key: &[u8]) -> Result<Vec<u8>> {
|
||||||
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
|
let old = self.db.rocks.get_cf(&self.cf(), key)?;
|
||||||
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
|
self.db.rocks.put_cf(&self.cf(), key, &new)?;
|
||||||
|
|
||||||
|
drop(lock);
|
||||||
|
Ok(new)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn increment_batch<'a>(&self, iter: &mut dyn Iterator<Item = Vec<u8>>) -> Result<()> {
|
||||||
|
let lock = self.write_lock.write().unwrap();
|
||||||
|
|
||||||
|
for key in iter {
|
||||||
|
let old = self.db.rocks.get_cf(&self.cf(), &key)?;
|
||||||
|
let new = utils::increment(old.as_deref()).unwrap();
|
||||||
|
self.db.rocks.put_cf(&self.cf(), key, new)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(lock);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn scan_prefix<'a>(
|
||||||
|
&'a self,
|
||||||
|
prefix: Vec<u8>,
|
||||||
|
) -> Box<dyn Iterator<Item = (Vec<u8>, Vec<u8>)> + 'a> {
|
||||||
|
Box::new(
|
||||||
|
self.db
|
||||||
|
.rocks
|
||||||
|
.iterator_cf(
|
||||||
|
&self.cf(),
|
||||||
|
rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward),
|
||||||
|
)
|
||||||
|
//.map(|r| r.unwrap())
|
||||||
|
.map(|(k, v)| (Vec::from(k), Vec::from(v)))
|
||||||
|
.take_while(move |(k, _)| k.starts_with(&prefix)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
|
self.watchers.watch(prefix)
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,54 @@
|
|||||||
|
use std::{
|
||||||
|
collections::{hash_map, HashMap},
|
||||||
|
future::Future,
|
||||||
|
pin::Pin,
|
||||||
|
sync::RwLock,
|
||||||
|
};
|
||||||
|
use tokio::sync::watch;
|
||||||
|
|
||||||
|
#[derive(Default)]
|
||||||
|
pub(super) struct Watchers {
|
||||||
|
watchers: RwLock<HashMap<Vec<u8>, (watch::Sender<()>, watch::Receiver<()>)>>,
|
||||||
|
}
|
||||||
|
|
||||||
|
impl Watchers {
|
||||||
|
pub(super) fn watch<'a>(
|
||||||
|
&'a self,
|
||||||
|
prefix: &[u8],
|
||||||
|
) -> Pin<Box<dyn Future<Output = ()> + Send + 'a>> {
|
||||||
|
let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) {
|
||||||
|
hash_map::Entry::Occupied(o) => o.get().1.clone(),
|
||||||
|
hash_map::Entry::Vacant(v) => {
|
||||||
|
let (tx, rx) = tokio::sync::watch::channel(());
|
||||||
|
v.insert((tx, rx.clone()));
|
||||||
|
rx
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
Box::pin(async move {
|
||||||
|
// Tx is never destroyed
|
||||||
|
rx.changed().await.unwrap();
|
||||||
|
})
|
||||||
|
}
|
||||||
|
pub(super) fn wake(&self, key: &[u8]) {
|
||||||
|
let watchers = self.watchers.read().unwrap();
|
||||||
|
let mut triggered = Vec::new();
|
||||||
|
|
||||||
|
for length in 0..=key.len() {
|
||||||
|
if watchers.contains_key(&key[..length]) {
|
||||||
|
triggered.push(&key[..length]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drop(watchers);
|
||||||
|
|
||||||
|
if !triggered.is_empty() {
|
||||||
|
let mut watchers = self.watchers.write().unwrap();
|
||||||
|
for prefix in triggered {
|
||||||
|
if let Some(tx) = watchers.remove(prefix) {
|
||||||
|
let _ = tx.0.send(());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
@ -1,130 +0,0 @@
|
|||||||
use std::{
|
|
||||||
convert::{TryFrom, TryInto},
|
|
||||||
sync::Arc,
|
|
||||||
};
|
|
||||||
|
|
||||||
use crate::{pdu::PduBuilder, Database};
|
|
||||||
use rocket::futures::{channel::mpsc, stream::StreamExt};
|
|
||||||
use ruma::{
|
|
||||||
events::{room::message::RoomMessageEventContent, EventType},
|
|
||||||
UserId,
|
|
||||||
};
|
|
||||||
use serde_json::value::to_raw_value;
|
|
||||||
use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard};
|
|
||||||
use tracing::warn;
|
|
||||||
|
|
||||||
pub enum AdminCommand {
|
|
||||||
RegisterAppservice(serde_yaml::Value),
|
|
||||||
ListAppservices,
|
|
||||||
SendMessage(RoomMessageEventContent),
|
|
||||||
}
|
|
||||||
|
|
||||||
#[derive(Clone)]
|
|
||||||
pub struct Admin {
|
|
||||||
pub sender: mpsc::UnboundedSender<AdminCommand>,
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Admin {
|
|
||||||
pub fn start_handler(
|
|
||||||
&self,
|
|
||||||
db: Arc<RwLock<Database>>,
|
|
||||||
mut receiver: mpsc::UnboundedReceiver<AdminCommand>,
|
|
||||||
) {
|
|
||||||
tokio::spawn(async move {
|
|
||||||
// TODO: Use futures when we have long admin commands
|
|
||||||
//let mut futures = FuturesUnordered::new();
|
|
||||||
|
|
||||||
let guard = db.read().await;
|
|
||||||
|
|
||||||
let conduit_user =
|
|
||||||
UserId::try_from(format!("@conduit:{}", guard.globals.server_name()))
|
|
||||||
.expect("@conduit:server_name is valid");
|
|
||||||
|
|
||||||
let conduit_room = guard
|
|
||||||
.rooms
|
|
||||||
.id_from_alias(
|
|
||||||
&format!("#admins:{}", guard.globals.server_name())
|
|
||||||
.try_into()
|
|
||||||
.expect("#admins:server_name is a valid room alias"),
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
|
|
||||||
let conduit_room = match conduit_room {
|
|
||||||
None => {
|
|
||||||
warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this.");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
Some(r) => r,
|
|
||||||
};
|
|
||||||
|
|
||||||
drop(guard);
|
|
||||||
|
|
||||||
let send_message = |message: RoomMessageEventContent,
|
|
||||||
guard: RwLockReadGuard<'_, Database>,
|
|
||||||
mutex_lock: &MutexGuard<'_, ()>| {
|
|
||||||
guard
|
|
||||||
.rooms
|
|
||||||
.build_and_append_pdu(
|
|
||||||
PduBuilder {
|
|
||||||
event_type: EventType::RoomMessage,
|
|
||||||
content: to_raw_value(&message)
|
|
||||||
.expect("event is valid, we just created it"),
|
|
||||||
unsigned: None,
|
|
||||||
state_key: None,
|
|
||||||
redacts: None,
|
|
||||||
},
|
|
||||||
&conduit_user,
|
|
||||||
&conduit_room,
|
|
||||||
&guard,
|
|
||||||
mutex_lock,
|
|
||||||
)
|
|
||||||
.unwrap();
|
|
||||||
};
|
|
||||||
|
|
||||||
loop {
|
|
||||||
tokio::select! {
|
|
||||||
Some(event) = receiver.next() => {
|
|
||||||
let guard = db.read().await;
|
|
||||||
let mutex_state = Arc::clone(
|
|
||||||
guard.globals
|
|
||||||
.roomid_mutex_state
|
|
||||||
.write()
|
|
||||||
.unwrap()
|
|
||||||
.entry(conduit_room.clone())
|
|
||||||
.or_default(),
|
|
||||||
);
|
|
||||||
let state_lock = mutex_state.lock().await;
|
|
||||||
|
|
||||||
match event {
|
|
||||||
AdminCommand::RegisterAppservice(yaml) => {
|
|
||||||
guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error
|
|
||||||
}
|
|
||||||
AdminCommand::ListAppservices => {
|
|
||||||
if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::<Vec<_>>()) {
|
|
||||||
let count = appservices.len();
|
|
||||||
let output = format!(
|
|
||||||
"Appservices ({}): {}",
|
|
||||||
count,
|
|
||||||
appservices.into_iter().filter_map(|r| r.ok()).collect::<Vec<_>>().join(", ")
|
|
||||||
);
|
|
||||||
send_message(RoomMessageEventContent::text_plain(output), guard, &state_lock);
|
|
||||||
} else {
|
|
||||||
send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
AdminCommand::SendMessage(message) => {
|
|
||||||
send_message(message, guard, &state_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
drop(state_lock);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn send(&self, command: AdminCommand) {
|
|
||||||
self.sender.unbounded_send(command).unwrap();
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,322 +0,0 @@
|
|||||||
use crate::{database::Config, server_server::FedDest, utils, ConduitResult, Error, Result};
|
|
||||||
use ruma::{
|
|
||||||
api::{
|
|
||||||
client::r0::sync::sync_events,
|
|
||||||
federation::discovery::{ServerSigningKeys, VerifyKey},
|
|
||||||
},
|
|
||||||
DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId,
|
|
||||||
};
|
|
||||||
use std::{
|
|
||||||
collections::{BTreeMap, HashMap},
|
|
||||||
fs,
|
|
||||||
future::Future,
|
|
||||||
net::IpAddr,
|
|
||||||
path::PathBuf,
|
|
||||||
sync::{Arc, Mutex, RwLock},
|
|
||||||
time::{Duration, Instant},
|
|
||||||
};
|
|
||||||
use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore};
|
|
||||||
use tracing::error;
|
|
||||||
use trust_dns_resolver::TokioAsyncResolver;
|
|
||||||
|
|
||||||
use super::abstraction::Tree;
|
|
||||||
|
|
||||||
pub const COUNTER: &[u8] = b"c";
|
|
||||||
|
|
||||||
type WellKnownMap = HashMap<Box<ServerName>, (FedDest, String)>;
|
|
||||||
type TlsNameMap = HashMap<String, (Vec<IpAddr>, u16)>;
|
|
||||||
type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries
|
|
||||||
type SyncHandle = (
|
|
||||||
Option<String>, // since
|
|
||||||
Receiver<Option<ConduitResult<sync_events::Response>>>, // rx
|
|
||||||
);
|
|
||||||
|
|
||||||
pub struct Globals {
|
|
||||||
pub actual_destination_cache: Arc<RwLock<WellKnownMap>>, // actual_destination, host
|
|
||||||
pub tls_name_override: Arc<RwLock<TlsNameMap>>,
|
|
||||||
pub(super) globals: Arc<dyn Tree>,
|
|
||||||
config: Config,
|
|
||||||
keypair: Arc<ruma::signatures::Ed25519KeyPair>,
|
|
||||||
dns_resolver: TokioAsyncResolver,
|
|
||||||
jwt_decoding_key: Option<jsonwebtoken::DecodingKey<'static>>,
|
|
||||||
pub(super) server_signingkeys: Arc<dyn Tree>,
|
|
||||||
pub bad_event_ratelimiter: Arc<RwLock<HashMap<EventId, RateLimitState>>>,
|
|
||||||
pub bad_signature_ratelimiter: Arc<RwLock<HashMap<Vec<String>, RateLimitState>>>,
|
|
||||||
pub servername_ratelimiter: Arc<RwLock<HashMap<Box<ServerName>, Arc<Semaphore>>>>,
|
|
||||||
pub sync_receivers: RwLock<HashMap<(UserId, Box<DeviceId>), SyncHandle>>,
|
|
||||||
pub roomid_mutex_insert: RwLock<HashMap<RoomId, Arc<Mutex<()>>>>,
|
|
||||||
pub roomid_mutex_state: RwLock<HashMap<RoomId, Arc<TokioMutex<()>>>>,
|
|
||||||
pub roomid_mutex_federation: RwLock<HashMap<RoomId, Arc<TokioMutex<()>>>>, // this lock will be held longer
|
|
||||||
pub rotate: RotationHandler,
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like.
|
|
||||||
///
|
|
||||||
/// This is utilized to have sync workers return early and release read locks on the database.
|
|
||||||
pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>);
|
|
||||||
|
|
||||||
impl RotationHandler {
|
|
||||||
pub fn new() -> Self {
|
|
||||||
let (s, r) = broadcast::channel(1);
|
|
||||||
Self(s, r)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn watch(&self) -> impl Future<Output = ()> {
|
|
||||||
let mut r = self.0.subscribe();
|
|
||||||
|
|
||||||
async move {
|
|
||||||
let _ = r.recv().await;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn fire(&self) {
|
|
||||||
let _ = self.0.send(());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Default for RotationHandler {
|
|
||||||
fn default() -> Self {
|
|
||||||
Self::new()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
impl Globals {
|
|
||||||
pub fn load(
|
|
||||||
globals: Arc<dyn Tree>,
|
|
||||||
server_signingkeys: Arc<dyn Tree>,
|
|
||||||
config: Config,
|
|
||||||
) -> Result<Self> {
|
|
||||||
let keypair_bytes = globals.get(b"keypair")?.map_or_else(
|
|
||||||
|| {
|
|
||||||
let keypair = utils::generate_keypair();
|
|
||||||
globals.insert(b"keypair", &keypair)?;
|
|
||||||
Ok::<_, Error>(keypair)
|
|
||||||
},
|
|
||||||
|s| Ok(s.to_vec()),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff);
|
|
||||||
|
|
||||||
let keypair = utils::string_from_bytes(
|
|
||||||
// 1. version
|
|
||||||
parts
|
|
||||||
.next()
|
|
||||||
.expect("splitn always returns at least one element"),
|
|
||||||
)
|
|
||||||
.map_err(|_| Error::bad_database("Invalid version bytes in keypair."))
|
|
||||||
.and_then(|version| {
|
|
||||||
// 2. key
|
|
||||||
parts
|
|
||||||
.next()
|
|
||||||
.ok_or_else(|| Error::bad_database("Invalid keypair format in database."))
|
|
||||||
.map(|key| (version, key))
|
|
||||||
})
|
|
||||||
.and_then(|(version, key)| {
|
|
||||||
ruma::signatures::Ed25519KeyPair::from_der(key, version)
|
|
||||||
.map_err(|_| Error::bad_database("Private or public keys are invalid."))
|
|
||||||
});
|
|
||||||
|
|
||||||
let keypair = match keypair {
|
|
||||||
Ok(k) => k,
|
|
||||||
Err(e) => {
|
|
||||||
error!("Keypair invalid. Deleting...");
|
|
||||||
globals.remove(b"keypair")?;
|
|
||||||
return Err(e);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new()));
|
|
||||||
|
|
||||||
let jwt_decoding_key = config
|
|
||||||
.jwt_secret
|
|
||||||
.as_ref()
|
|
||||||
.map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static());
|
|
||||||
|
|
||||||
let s = Self {
|
|
||||||
globals,
|
|
||||||
config,
|
|
||||||
keypair: Arc::new(keypair),
|
|
||||||
dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| {
|
|
||||||
Error::bad_config("Failed to set up trust dns resolver with system config.")
|
|
||||||
})?,
|
|
||||||
actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())),
|
|
||||||
tls_name_override,
|
|
||||||
server_signingkeys,
|
|
||||||
jwt_decoding_key,
|
|
||||||
bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
|
||||||
bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
|
||||||
servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())),
|
|
||||||
roomid_mutex_state: RwLock::new(HashMap::new()),
|
|
||||||
roomid_mutex_insert: RwLock::new(HashMap::new()),
|
|
||||||
roomid_mutex_federation: RwLock::new(HashMap::new()),
|
|
||||||
sync_receivers: RwLock::new(HashMap::new()),
|
|
||||||
rotate: RotationHandler::new(),
|
|
||||||
};
|
|
||||||
|
|
||||||
fs::create_dir_all(s.get_media_folder())?;
|
|
||||||
|
|
||||||
Ok(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns this server's keypair.
|
|
||||||
pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair {
|
|
||||||
&self.keypair
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Returns a reqwest client which can be used to send requests.
|
|
||||||
pub fn reqwest_client(&self) -> Result<reqwest::ClientBuilder> {
|
|
||||||
let mut reqwest_client_builder = reqwest::Client::builder()
|
|
||||||
.connect_timeout(Duration::from_secs(30))
|
|
||||||
.timeout(Duration::from_secs(60 * 3))
|
|
||||||
.pool_max_idle_per_host(1);
|
|
||||||
if let Some(proxy) = self.config.proxy.to_proxy()? {
|
|
||||||
reqwest_client_builder = reqwest_client_builder.proxy(proxy);
|
|
||||||
}
|
|
||||||
|
|
||||||
Ok(reqwest_client_builder)
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
pub fn next_count(&self) -> Result<u64> {
|
|
||||||
utils::u64_from_bytes(&self.globals.increment(COUNTER)?)
|
|
||||||
.map_err(|_| Error::bad_database("Count has invalid bytes."))
|
|
||||||
}
|
|
||||||
|
|
||||||
#[tracing::instrument(skip(self))]
|
|
||||||
pub fn current_count(&self) -> Result<u64> {
|
|
||||||
self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| {
|
|
||||||
utils::u64_from_bytes(&bytes)
|
|
||||||
.map_err(|_| Error::bad_database("Count has invalid bytes."))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn server_name(&self) -> &ServerName {
|
|
||||||
self.config.server_name.as_ref()
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn max_request_size(&self) -> u32 {
|
|
||||||
self.config.max_request_size
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn allow_registration(&self) -> bool {
|
|
||||||
self.config.allow_registration
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn allow_encryption(&self) -> bool {
|
|
||||||
self.config.allow_encryption
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn allow_federation(&self) -> bool {
|
|
||||||
self.config.allow_federation
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn allow_room_creation(&self) -> bool {
|
|
||||||
self.config.allow_room_creation
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn trusted_servers(&self) -> &[Box<ServerName>] {
|
|
||||||
&self.config.trusted_servers
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn dns_resolver(&self) -> &TokioAsyncResolver {
|
|
||||||
&self.dns_resolver
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> {
|
|
||||||
self.jwt_decoding_key.as_ref()
|
|
||||||
}
|
|
||||||
|
|
||||||
/// TODO: the key valid until timestamp is only honored in room version > 4
|
|
||||||
/// Remove the outdated keys and insert the new ones.
|
|
||||||
///
|
|
||||||
/// This doesn't actually check that the keys provided are newer than the old set.
|
|
||||||
pub fn add_signing_key(
|
|
||||||
&self,
|
|
||||||
origin: &ServerName,
|
|
||||||
new_keys: ServerSigningKeys,
|
|
||||||
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> {
|
|
||||||
// Not atomic, but this is not critical
|
|
||||||
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
|
|
||||||
|
|
||||||
let mut keys = signingkeys
|
|
||||||
.and_then(|keys| serde_json::from_slice(&keys).ok())
|
|
||||||
.unwrap_or_else(|| {
|
|
||||||
// Just insert "now", it doesn't matter
|
|
||||||
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
|
||||||
});
|
|
||||||
|
|
||||||
let ServerSigningKeys {
|
|
||||||
verify_keys,
|
|
||||||
old_verify_keys,
|
|
||||||
..
|
|
||||||
} = new_keys;
|
|
||||||
|
|
||||||
keys.verify_keys.extend(verify_keys.into_iter());
|
|
||||||
keys.old_verify_keys.extend(old_verify_keys.into_iter());
|
|
||||||
|
|
||||||
self.server_signingkeys.insert(
|
|
||||||
origin.as_bytes(),
|
|
||||||
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
|
|
||||||
)?;
|
|
||||||
|
|
||||||
let mut tree = keys.verify_keys;
|
|
||||||
tree.extend(
|
|
||||||
keys.old_verify_keys
|
|
||||||
.into_iter()
|
|
||||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
|
||||||
);
|
|
||||||
|
|
||||||
Ok(tree)
|
|
||||||
}
|
|
||||||
|
|
||||||
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
|
||||||
pub fn signing_keys_for(
|
|
||||||
&self,
|
|
||||||
origin: &ServerName,
|
|
||||||
) -> Result<BTreeMap<ServerSigningKeyId, VerifyKey>> {
|
|
||||||
let signingkeys = self
|
|
||||||
.server_signingkeys
|
|
||||||
.get(origin.as_bytes())?
|
|
||||||
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
|
|
||||||
.map(|keys: ServerSigningKeys| {
|
|
||||||
let mut tree = keys.verify_keys;
|
|
||||||
tree.extend(
|
|
||||||
keys.old_verify_keys
|
|
||||||
.into_iter()
|
|
||||||
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
|
||||||
);
|
|
||||||
tree
|
|
||||||
})
|
|
||||||
.unwrap_or_else(BTreeMap::new);
|
|
||||||
|
|
||||||
Ok(signingkeys)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn database_version(&self) -> Result<u64> {
|
|
||||||
self.globals.get(b"version")?.map_or(Ok(0), |version| {
|
|
||||||
utils::u64_from_bytes(&version)
|
|
||||||
.map_err(|_| Error::bad_database("Database version id is invalid."))
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn bump_database_version(&self, new_version: u64) -> Result<()> {
|
|
||||||
self.globals
|
|
||||||
.insert(b"version", &new_version.to_be_bytes())?;
|
|
||||||
Ok(())
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_media_folder(&self) -> PathBuf {
|
|
||||||
let mut r = PathBuf::new();
|
|
||||||
r.push(self.config.database_path.clone());
|
|
||||||
r.push("media");
|
|
||||||
r
|
|
||||||
}
|
|
||||||
|
|
||||||
pub fn get_media_file(&self, key: &[u8]) -> PathBuf {
|
|
||||||
let mut r = PathBuf::new();
|
|
||||||
r.push(self.config.database_path.clone());
|
|
||||||
r.push("media");
|
|
||||||
r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD));
|
|
||||||
r
|
|
||||||
}
|
|
||||||
}
|
|
@ -0,0 +1,233 @@
|
|||||||
|
use std::collections::BTreeMap;
|
||||||
|
|
||||||
|
use async_trait::async_trait;
|
||||||
|
use futures_util::{stream::FuturesUnordered, StreamExt};
|
||||||
|
use ruma::{
|
||||||
|
api::federation::discovery::{ServerSigningKeys, VerifyKey},
|
||||||
|
signatures::Ed25519KeyPair,
|
||||||
|
DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
pub const COUNTER: &[u8] = b"c";
|
||||||
|
|
||||||
|
#[async_trait]
|
||||||
|
impl service::globals::Data for KeyValueDatabase {
|
||||||
|
fn next_count(&self) -> Result<u64> {
|
||||||
|
utils::u64_from_bytes(&self.global.increment(COUNTER)?)
|
||||||
|
.map_err(|_| Error::bad_database("Count has invalid bytes."))
|
||||||
|
}
|
||||||
|
|
||||||
|
fn current_count(&self) -> Result<u64> {
|
||||||
|
self.global.get(COUNTER)?.map_or(Ok(0_u64), |bytes| {
|
||||||
|
utils::u64_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Count has invalid bytes."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> {
|
||||||
|
let userid_bytes = user_id.as_bytes().to_vec();
|
||||||
|
let mut userid_prefix = userid_bytes.clone();
|
||||||
|
userid_prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut userdeviceid_prefix = userid_prefix.clone();
|
||||||
|
userdeviceid_prefix.extend_from_slice(device_id.as_bytes());
|
||||||
|
userdeviceid_prefix.push(0xff);
|
||||||
|
|
||||||
|
let mut futures = FuturesUnordered::new();
|
||||||
|
|
||||||
|
// Return when *any* user changed his key
|
||||||
|
// TODO: only send for user they share a room with
|
||||||
|
futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix));
|
||||||
|
|
||||||
|
futures.push(self.userroomid_joined.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix));
|
||||||
|
futures.push(
|
||||||
|
self.userroomid_notificationcount
|
||||||
|
.watch_prefix(&userid_prefix),
|
||||||
|
);
|
||||||
|
futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix));
|
||||||
|
|
||||||
|
// Events for rooms we are in
|
||||||
|
for room_id in services()
|
||||||
|
.rooms
|
||||||
|
.state_cache
|
||||||
|
.rooms_joined(user_id)
|
||||||
|
.filter_map(|r| r.ok())
|
||||||
|
{
|
||||||
|
let short_roomid = services()
|
||||||
|
.rooms
|
||||||
|
.short
|
||||||
|
.get_shortroomid(&room_id)
|
||||||
|
.ok()
|
||||||
|
.flatten()
|
||||||
|
.expect("room exists")
|
||||||
|
.to_be_bytes()
|
||||||
|
.to_vec();
|
||||||
|
|
||||||
|
let roomid_bytes = room_id.as_bytes().to_vec();
|
||||||
|
let mut roomid_prefix = roomid_bytes.clone();
|
||||||
|
roomid_prefix.push(0xff);
|
||||||
|
|
||||||
|
// PDUs
|
||||||
|
futures.push(self.pduid_pdu.watch_prefix(&short_roomid));
|
||||||
|
|
||||||
|
// EDUs
|
||||||
|
futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes));
|
||||||
|
|
||||||
|
futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
// Key changes
|
||||||
|
futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix));
|
||||||
|
|
||||||
|
// Room account data
|
||||||
|
let mut roomuser_prefix = roomid_prefix.clone();
|
||||||
|
roomuser_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.roomusertype_roomuserdataid
|
||||||
|
.watch_prefix(&roomuser_prefix),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
let mut globaluserdata_prefix = vec![0xff];
|
||||||
|
globaluserdata_prefix.extend_from_slice(&userid_prefix);
|
||||||
|
|
||||||
|
futures.push(
|
||||||
|
self.roomusertype_roomuserdataid
|
||||||
|
.watch_prefix(&globaluserdata_prefix),
|
||||||
|
);
|
||||||
|
|
||||||
|
// More key changes (used when user is not joined to any rooms)
|
||||||
|
futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix));
|
||||||
|
|
||||||
|
// One time keys
|
||||||
|
futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes));
|
||||||
|
|
||||||
|
futures.push(Box::pin(services().globals.rotate.watch()));
|
||||||
|
|
||||||
|
// Wait until one of them finds something
|
||||||
|
futures.next().await;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cleanup(&self) -> Result<()> {
|
||||||
|
self._db.cleanup()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn memory_usage(&self) -> Result<String> {
|
||||||
|
self._db.memory_usage()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn load_keypair(&self) -> Result<Ed25519KeyPair> {
|
||||||
|
let keypair_bytes = self.global.get(b"keypair")?.map_or_else(
|
||||||
|
|| {
|
||||||
|
let keypair = utils::generate_keypair();
|
||||||
|
self.global.insert(b"keypair", &keypair)?;
|
||||||
|
Ok::<_, Error>(keypair)
|
||||||
|
},
|
||||||
|
|s| Ok(s.to_vec()),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff);
|
||||||
|
|
||||||
|
utils::string_from_bytes(
|
||||||
|
// 1. version
|
||||||
|
parts
|
||||||
|
.next()
|
||||||
|
.expect("splitn always returns at least one element"),
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid version bytes in keypair."))
|
||||||
|
.and_then(|version| {
|
||||||
|
// 2. key
|
||||||
|
parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid keypair format in database."))
|
||||||
|
.map(|key| (version, key))
|
||||||
|
})
|
||||||
|
.and_then(|(version, key)| {
|
||||||
|
Ed25519KeyPair::from_der(key, version)
|
||||||
|
.map_err(|_| Error::bad_database("Private or public keys are invalid."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
fn remove_keypair(&self) -> Result<()> {
|
||||||
|
self.global.remove(b"keypair")
|
||||||
|
}
|
||||||
|
|
||||||
|
fn add_signing_key(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
new_keys: ServerSigningKeys,
|
||||||
|
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||||
|
// Not atomic, but this is not critical
|
||||||
|
let signingkeys = self.server_signingkeys.get(origin.as_bytes())?;
|
||||||
|
|
||||||
|
let mut keys = signingkeys
|
||||||
|
.and_then(|keys| serde_json::from_slice(&keys).ok())
|
||||||
|
.unwrap_or_else(|| {
|
||||||
|
// Just insert "now", it doesn't matter
|
||||||
|
ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now())
|
||||||
|
});
|
||||||
|
|
||||||
|
let ServerSigningKeys {
|
||||||
|
verify_keys,
|
||||||
|
old_verify_keys,
|
||||||
|
..
|
||||||
|
} = new_keys;
|
||||||
|
|
||||||
|
keys.verify_keys.extend(verify_keys.into_iter());
|
||||||
|
keys.old_verify_keys.extend(old_verify_keys.into_iter());
|
||||||
|
|
||||||
|
self.server_signingkeys.insert(
|
||||||
|
origin.as_bytes(),
|
||||||
|
&serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
let mut tree = keys.verify_keys;
|
||||||
|
tree.extend(
|
||||||
|
keys.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||||
|
);
|
||||||
|
|
||||||
|
Ok(tree)
|
||||||
|
}
|
||||||
|
|
||||||
|
/// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server.
|
||||||
|
fn signing_keys_for(
|
||||||
|
&self,
|
||||||
|
origin: &ServerName,
|
||||||
|
) -> Result<BTreeMap<OwnedServerSigningKeyId, VerifyKey>> {
|
||||||
|
let signingkeys = self
|
||||||
|
.server_signingkeys
|
||||||
|
.get(origin.as_bytes())?
|
||||||
|
.and_then(|bytes| serde_json::from_slice(&bytes).ok())
|
||||||
|
.map(|keys: ServerSigningKeys| {
|
||||||
|
let mut tree = keys.verify_keys;
|
||||||
|
tree.extend(
|
||||||
|
keys.old_verify_keys
|
||||||
|
.into_iter()
|
||||||
|
.map(|old| (old.0, VerifyKey::new(old.1.key))),
|
||||||
|
);
|
||||||
|
tree
|
||||||
|
})
|
||||||
|
.unwrap_or_else(BTreeMap::new);
|
||||||
|
|
||||||
|
Ok(signingkeys)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn database_version(&self) -> Result<u64> {
|
||||||
|
self.global.get(b"version")?.map_or(Ok(0), |version| {
|
||||||
|
utils::u64_from_bytes(&version)
|
||||||
|
.map_err(|_| Error::bad_database("Database version id is invalid."))
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
fn bump_database_version(&self, new_version: u64) -> Result<()> {
|
||||||
|
self.global.insert(b"version", &new_version.to_be_bytes())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,82 @@
|
|||||||
|
use ruma::api::client::error::ErrorKind;
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::media::Data for KeyValueDatabase {
|
||||||
|
fn create_file_metadata(
|
||||||
|
&self,
|
||||||
|
mxc: String,
|
||||||
|
width: u32,
|
||||||
|
height: u32,
|
||||||
|
content_disposition: Option<&str>,
|
||||||
|
content_type: Option<&str>,
|
||||||
|
) -> Result<Vec<u8>> {
|
||||||
|
let mut key = mxc.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(&width.to_be_bytes());
|
||||||
|
key.extend_from_slice(&height.to_be_bytes());
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(
|
||||||
|
content_disposition
|
||||||
|
.as_ref()
|
||||||
|
.map(|f| f.as_bytes())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
);
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(
|
||||||
|
content_type
|
||||||
|
.as_ref()
|
||||||
|
.map(|c| c.as_bytes())
|
||||||
|
.unwrap_or_default(),
|
||||||
|
);
|
||||||
|
|
||||||
|
self.mediaid_file.insert(&key, &[])?;
|
||||||
|
|
||||||
|
Ok(key)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn search_file_metadata(
|
||||||
|
&self,
|
||||||
|
mxc: String,
|
||||||
|
width: u32,
|
||||||
|
height: u32,
|
||||||
|
) -> Result<(Option<String>, Option<String>, Vec<u8>)> {
|
||||||
|
let mut prefix = mxc.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
prefix.extend_from_slice(&width.to_be_bytes());
|
||||||
|
prefix.extend_from_slice(&height.to_be_bytes());
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
let (key, _) = self
|
||||||
|
.mediaid_file
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.next()
|
||||||
|
.ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?;
|
||||||
|
|
||||||
|
let mut parts = key.rsplit(|&b| b == 0xff);
|
||||||
|
|
||||||
|
let content_type = parts
|
||||||
|
.next()
|
||||||
|
.map(|bytes| {
|
||||||
|
utils::string_from_bytes(bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Content type in mediaid_file is invalid unicode.")
|
||||||
|
})
|
||||||
|
})
|
||||||
|
.transpose()?;
|
||||||
|
|
||||||
|
let content_disposition_bytes = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?;
|
||||||
|
|
||||||
|
let content_disposition = if content_disposition_bytes.is_empty() {
|
||||||
|
None
|
||||||
|
} else {
|
||||||
|
Some(
|
||||||
|
utils::string_from_bytes(content_disposition_bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Content Disposition in mediaid_file is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
};
|
||||||
|
Ok((content_disposition, content_type, key))
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,13 @@
|
|||||||
|
mod account_data;
|
||||||
|
//mod admin;
|
||||||
|
mod appservice;
|
||||||
|
mod globals;
|
||||||
|
mod key_backups;
|
||||||
|
mod media;
|
||||||
|
//mod pdu;
|
||||||
|
mod pusher;
|
||||||
|
mod rooms;
|
||||||
|
mod sending;
|
||||||
|
mod transaction_ids;
|
||||||
|
mod uiaa;
|
||||||
|
mod users;
|
@ -0,0 +1,81 @@
|
|||||||
|
use ruma::{
|
||||||
|
api::client::push::{get_pushers, set_pusher},
|
||||||
|
UserId,
|
||||||
|
};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::pusher::Data for KeyValueDatabase {
|
||||||
|
fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> {
|
||||||
|
let mut key = sender.as_bytes().to_vec();
|
||||||
|
key.push(0xff);
|
||||||
|
key.extend_from_slice(pusher.pushkey.as_bytes());
|
||||||
|
|
||||||
|
// There are 2 kinds of pushers but the spec says: null deletes the pusher.
|
||||||
|
if pusher.kind.is_none() {
|
||||||
|
return self
|
||||||
|
.senderkey_pusher
|
||||||
|
.remove(&key)
|
||||||
|
.map(|_| ())
|
||||||
|
.map_err(Into::into);
|
||||||
|
}
|
||||||
|
|
||||||
|
self.senderkey_pusher.insert(
|
||||||
|
&key,
|
||||||
|
&serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"),
|
||||||
|
)?;
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pusher(
|
||||||
|
&self,
|
||||||
|
sender: &UserId,
|
||||||
|
pushkey: &str,
|
||||||
|
) -> Result<Option<get_pushers::v3::Pusher>> {
|
||||||
|
let mut senderkey = sender.as_bytes().to_vec();
|
||||||
|
senderkey.push(0xff);
|
||||||
|
senderkey.extend_from_slice(pushkey.as_bytes());
|
||||||
|
|
||||||
|
self.senderkey_pusher
|
||||||
|
.get(&senderkey)?
|
||||||
|
.map(|push| {
|
||||||
|
serde_json::from_slice(&push)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid Pusher in db."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pushers(&self, sender: &UserId) -> Result<Vec<get_pushers::v3::Pusher>> {
|
||||||
|
let mut prefix = sender.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
self.senderkey_pusher
|
||||||
|
.scan_prefix(prefix)
|
||||||
|
.map(|(_, push)| {
|
||||||
|
serde_json::from_slice(&push)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid Pusher in db."))
|
||||||
|
})
|
||||||
|
.collect()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn get_pushkeys<'a>(
|
||||||
|
&'a self,
|
||||||
|
sender: &UserId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<String>> + 'a> {
|
||||||
|
let mut prefix = sender.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| {
|
||||||
|
let mut parts = k.splitn(2, |&b| b == 0xff);
|
||||||
|
let _senderkey = parts.next();
|
||||||
|
let push_key = parts
|
||||||
|
.next()
|
||||||
|
.ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?;
|
||||||
|
let push_key_string = utils::string_from_bytes(push_key)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?;
|
||||||
|
|
||||||
|
Ok(push_key_string)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,60 @@
|
|||||||
|
use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, services, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::alias::Data for KeyValueDatabase {
|
||||||
|
fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.alias_roomid
|
||||||
|
.insert(alias.alias().as_bytes(), room_id.as_bytes())?;
|
||||||
|
let mut aliasid = room_id.as_bytes().to_vec();
|
||||||
|
aliasid.push(0xff);
|
||||||
|
aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes());
|
||||||
|
self.aliasid_alias.insert(&aliasid, alias.as_bytes())?;
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> {
|
||||||
|
if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? {
|
||||||
|
let mut prefix = room_id.to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
for (key, _) in self.aliasid_alias.scan_prefix(prefix) {
|
||||||
|
self.aliasid_alias.remove(&key)?;
|
||||||
|
}
|
||||||
|
self.alias_roomid.remove(alias.alias().as_bytes())?;
|
||||||
|
} else {
|
||||||
|
return Err(Error::BadRequest(
|
||||||
|
ErrorKind::NotFound,
|
||||||
|
"Alias does not exist.",
|
||||||
|
));
|
||||||
|
}
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result<Option<OwnedRoomId>> {
|
||||||
|
self.alias_roomid
|
||||||
|
.get(alias.alias().as_bytes())?
|
||||||
|
.map(|bytes| {
|
||||||
|
RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in alias_roomid is invalid unicode.")
|
||||||
|
})?)
|
||||||
|
.map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid."))
|
||||||
|
})
|
||||||
|
.transpose()
|
||||||
|
}
|
||||||
|
|
||||||
|
fn local_aliases_for_room<'a>(
|
||||||
|
&'a self,
|
||||||
|
room_id: &RoomId,
|
||||||
|
) -> Box<dyn Iterator<Item = Result<OwnedRoomAliasId>> + 'a> {
|
||||||
|
let mut prefix = room_id.as_bytes().to_vec();
|
||||||
|
prefix.push(0xff);
|
||||||
|
|
||||||
|
Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| {
|
||||||
|
utils::string_from_bytes(&bytes)
|
||||||
|
.map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))?
|
||||||
|
.try_into()
|
||||||
|
.map_err(|_| Error::bad_database("Invalid alias in aliasid_alias."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,61 @@
|
|||||||
|
use std::{collections::HashSet, mem::size_of, sync::Arc};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Result};
|
||||||
|
|
||||||
|
impl service::rooms::auth_chain::Data for KeyValueDatabase {
|
||||||
|
fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result<Option<Arc<HashSet<u64>>>> {
|
||||||
|
// Check RAM cache
|
||||||
|
if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) {
|
||||||
|
return Ok(Some(Arc::clone(result)));
|
||||||
|
}
|
||||||
|
|
||||||
|
// We only save auth chains for single events in the db
|
||||||
|
if key.len() == 1 {
|
||||||
|
// Check DB cache
|
||||||
|
let chain = self
|
||||||
|
.shorteventid_authchain
|
||||||
|
.get(&key[0].to_be_bytes())?
|
||||||
|
.map(|chain| {
|
||||||
|
chain
|
||||||
|
.chunks_exact(size_of::<u64>())
|
||||||
|
.map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct"))
|
||||||
|
.collect()
|
||||||
|
});
|
||||||
|
|
||||||
|
if let Some(chain) = chain {
|
||||||
|
let chain = Arc::new(chain);
|
||||||
|
|
||||||
|
// Cache in RAM
|
||||||
|
self.auth_chain_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(vec![key[0]], Arc::clone(&chain));
|
||||||
|
|
||||||
|
return Ok(Some(chain));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
Ok(None)
|
||||||
|
}
|
||||||
|
|
||||||
|
fn cache_auth_chain(&self, key: Vec<u64>, auth_chain: Arc<HashSet<u64>>) -> Result<()> {
|
||||||
|
// Only persist single events in db
|
||||||
|
if key.len() == 1 {
|
||||||
|
self.shorteventid_authchain.insert(
|
||||||
|
&key[0].to_be_bytes(),
|
||||||
|
&auth_chain
|
||||||
|
.iter()
|
||||||
|
.flat_map(|s| s.to_be_bytes().to_vec())
|
||||||
|
.collect::<Vec<u8>>(),
|
||||||
|
)?;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Cache in RAM
|
||||||
|
self.auth_chain_cache
|
||||||
|
.lock()
|
||||||
|
.unwrap()
|
||||||
|
.insert(key, auth_chain);
|
||||||
|
|
||||||
|
Ok(())
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,28 @@
|
|||||||
|
use ruma::{OwnedRoomId, RoomId};
|
||||||
|
|
||||||
|
use crate::{database::KeyValueDatabase, service, utils, Error, Result};
|
||||||
|
|
||||||
|
impl service::rooms::directory::Data for KeyValueDatabase {
|
||||||
|
fn set_public(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.publicroomids.insert(room_id.as_bytes(), &[])
|
||||||
|
}
|
||||||
|
|
||||||
|
fn set_not_public(&self, room_id: &RoomId) -> Result<()> {
|
||||||
|
self.publicroomids.remove(room_id.as_bytes())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn is_public_room(&self, room_id: &RoomId) -> Result<bool> {
|
||||||
|
Ok(self.publicroomids.get(room_id.as_bytes())?.is_some())
|
||||||
|
}
|
||||||
|
|
||||||
|
fn public_rooms<'a>(&'a self) -> Box<dyn Iterator<Item = Result<OwnedRoomId>> + 'a> {
|
||||||
|
Box::new(self.publicroomids.iter().map(|(bytes, _)| {
|
||||||
|
RoomId::parse(
|
||||||
|
utils::string_from_bytes(&bytes).map_err(|_| {
|
||||||
|
Error::bad_database("Room ID in publicroomids is invalid unicode.")
|
||||||
|
})?,
|
||||||
|
)
|
||||||
|
.map_err(|_| Error::bad_database("Room ID in publicroomids is invalid."))
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue