diff --git a/.dockerignore b/.dockerignore index 80b30721..933b380f 100644 --- a/.dockerignore +++ b/.dockerignore @@ -14,6 +14,8 @@ docker-compose* # Git folder .git .gitea +.gitlab +.github # Dot files .env diff --git a/.gitignore b/.gitignore index 1f5f395f..f5e9505b 100644 --- a/.gitignore +++ b/.gitignore @@ -57,7 +57,6 @@ $RECYCLE.BIN/ *.lnk # Conduit -Rocket.toml conduit.toml conduit.db diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 386986fd..eb7a96fd 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -8,7 +8,10 @@ variables: GIT_SUBMODULE_STRATEGY: recursive FF_USE_FASTZIP: 1 CACHE_COMPRESSION_LEVEL: fastest - + # Docker in Docker + DOCKER_HOST: tcp://docker:2375/ + DOCKER_TLS_CERTDIR: "" + DOCKER_DRIVER: overlay2 # --------------------------------------------------------------------- # # Cargo: Compiling for different architectures # @@ -20,294 +23,276 @@ variables: rules: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' + - if: "$CI_COMMIT_TAG" + - if: '($CI_MERGE_REQUEST_APPROVED == "true") || $BUILD_EVERYTHING' # Once MR is approved, test all builds. Or if BUILD_EVERYTHING is set. interruptible: true - image: "rust:latest" + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools@sha256:69ab327974aef4cc0daf4273579253bf7ae5e379a6c52729b83137e4caa9d093" tags: ["docker"] - cache: - paths: - - cargohome - - target/ - key: "build_cache-$TARGET-release" + services: ["docker:dind"] variables: - CARGO_PROFILE_RELEASE_LTO=true - CARGO_PROFILE_RELEASE_CODEGEN_UNITS=1 + SHARED_PATH: $CI_PROJECT_DIR/shared + CARGO_PROFILE_RELEASE_LTO: "true" + CARGO_PROFILE_RELEASE_CODEGEN_UNITS: "1" + CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow before_script: - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - 'apt-get update -yqq' - - 'echo "Installing packages: $NEEDED_PACKAGES"' - - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" - - "rustup target add $TARGET" + - "rustup show && rustc --version && cargo --version" # Print version info for debugging + # fix cargo and rustup mounts from this container (https://gitlab.com/gitlab-org/gitlab-foss/-/issues/41227) + - "mkdir -p $SHARED_PATH/cargo" + - "cp -r $CARGO_HOME/bin $SHARED_PATH/cargo" + - "cp -r $RUSTUP_HOME $SHARED_PATH" + - "export CARGO_HOME=$SHARED_PATH/cargo RUSTUP_HOME=$SHARED_PATH/rustup" + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results. + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/sccache; fi script: - - time cargo build --target $TARGET --release + # cross-compile conduit for target + - 'time cross build --target="$TARGET" --locked --release' - 'mv "target/$TARGET/release/conduit" "conduit-$TARGET"' + # print information about linking for debugging + - "file conduit-$TARGET" # print file information + - 'readelf --dynamic conduit-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked + cache: + # https://doc.rust-lang.org/cargo/guide/cargo-home.html#caching-the-cargo-home-in-ci + key: "cargo-cache-$TARGET" + paths: + - $SHARED_PATH/cargo/registry/index + - $SHARED_PATH/cargo/registry/cache + - $SHARED_PATH/cargo/git/db artifacts: expire_in: never - -build:release:cargo:x86_64-unknown-linux-gnu: +build:release:cargo:x86_64-unknown-linux-musl-with-debug: extends: .build-cargo-shared-settings variables: - TARGET: "x86_64-unknown-linux-gnu" + CARGO_PROFILE_RELEASE_DEBUG: 2 # Enable debug info for flamegraph profiling + TARGET: "x86_64-unknown-linux-musl" + after_script: + - "mv ./conduit-x86_64-unknown-linux-musl ./conduit-x86_64-unknown-linux-musl-with-debug" artifacts: - name: "conduit-x86_64-unknown-linux-gnu" + name: "conduit-x86_64-unknown-linux-musl-with-debug" paths: - - "conduit-x86_64-unknown-linux-gnu" - expose_as: "Conduit for x86_64-unknown-linux-gnu" + - "conduit-x86_64-unknown-linux-musl-with-debug" + expose_as: "Conduit for x86_64-unknown-linux-musl-with-debug" -build:release:cargo:armv7-unknown-linux-gnueabihf: +build:release:cargo:x86_64-unknown-linux-musl: extends: .build-cargo-shared-settings variables: - TARGET: "armv7-unknown-linux-gnueabihf" - NEEDED_PACKAGES: "build-essential gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf libc6-dev-armhf-cross" - CARGO_TARGET_ARMV7_UNKNOWN_LINUX_GNUEABIHF_LINKER: arm-linux-gnueabihf-gcc - CC_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-gcc - CXX_armv7_unknown_linux_gnueabihf: arm-linux-gnueabihf-g++ + TARGET: "x86_64-unknown-linux-musl" artifacts: - name: "conduit-armv7-unknown-linux-gnueabihf" + name: "conduit-x86_64-unknown-linux-musl" paths: - - "conduit-armv7-unknown-linux-gnueabihf" - expose_as: "Conduit for armv7-unknown-linux-gnueabihf" + - "conduit-x86_64-unknown-linux-musl" + expose_as: "Conduit for x86_64-unknown-linux-musl" -build:release:cargo:aarch64-unknown-linux-gnu: +build:release:cargo:arm-unknown-linux-musleabihf: extends: .build-cargo-shared-settings variables: - TARGET: "aarch64-unknown-linux-gnu" - NEEDED_PACKAGES: "build-essential gcc-10-aarch64-linux-gnu g++-aarch64-linux-gnu libc6-dev-arm64-cross" - CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER: aarch64-linux-gnu-gcc - CC_aarch64_unknown_linux_gnu: aarch64-linux-gnu-gcc - CXX_aarch64_unknown_linux_gnu: aarch64-linux-gnu-g++ - TARGET_CC: "/usr/bin/aarch64-linux-gnu-gcc-10" - TARGET_AR: "/usr/bin/aarch64-linux-gnu-gcc-ar-10" + TARGET: "arm-unknown-linux-musleabihf" artifacts: - name: "conduit-aarch64-unknown-linux-gnu" + name: "conduit-arm-unknown-linux-musleabihf" paths: - - "conduit-aarch64-unknown-linux-gnu" - expose_as: "Conduit for aarch64-unknown-linux-gnu" + - "conduit-arm-unknown-linux-musleabihf" + expose_as: "Conduit for arm-unknown-linux-musleabihf" -build:release:cargo:x86_64-unknown-linux-musl: +build:release:cargo:armv7-unknown-linux-musleabihf: extends: .build-cargo-shared-settings - image: "rust:alpine" variables: - TARGET: "x86_64-unknown-linux-musl" - before_script: - - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - "rustup target add $TARGET" - - "apk add libc-dev" + TARGET: "armv7-unknown-linux-musleabihf" artifacts: - name: "conduit-x86_64-unknown-linux-musl" + name: "conduit-armv7-unknown-linux-musleabihf" paths: - - "conduit-x86_64-unknown-linux-musl" - expose_as: "Conduit for x86_64-unknown-linux-musl" - + - "conduit-armv7-unknown-linux-musleabihf" + expose_as: "Conduit for armv7-unknown-linux-musleabihf" +build:release:cargo:aarch64-unknown-linux-musl: + extends: .build-cargo-shared-settings + variables: + TARGET: "aarch64-unknown-linux-musl" + artifacts: + name: "conduit-aarch64-unknown-linux-musl" + paths: + - "conduit-aarch64-unknown-linux-musl" + expose_as: "Conduit for aarch64-unknown-linux-musl" .cargo-debug-shared-settings: extends: ".build-cargo-shared-settings" rules: - - if: '$CI_COMMIT_BRANCH' - - if: '$CI_COMMIT_TAG' + - when: "always" cache: - key: "build_cache-$TARGET-debug" + key: "build_cache--$TARGET--$CI_COMMIT_BRANCH--debug" script: - - "time cargo build --target $TARGET" + # cross-compile conduit for target + - 'time time cross build --target="$TARGET" --locked' - 'mv "target/$TARGET/debug/conduit" "conduit-debug-$TARGET"' + # print information about linking for debugging + - "file conduit-debug-$TARGET" # print file information + - 'readelf --dynamic conduit-debug-$TARGET | sed -e "/NEEDED/q1"' # ensure statically linked artifacts: expire_in: 4 weeks -build:debug:cargo:x86_64-unknown-linux-gnu: - extends: ".cargo-debug-shared-settings" - variables: - TARGET: "x86_64-unknown-linux-gnu" - artifacts: - name: "conduit-debug-x86_64-unknown-linux-gnu" - paths: - - "conduit-debug-x86_64-unknown-linux-gnu" - expose_as: "Conduit DEBUG for x86_64-unknown-linux-gnu" - build:debug:cargo:x86_64-unknown-linux-musl: extends: ".cargo-debug-shared-settings" - image: "rust:alpine" variables: TARGET: "x86_64-unknown-linux-musl" - before_script: - - 'echo "Building for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - "rustup target add $TARGET" - - "apk add libc-dev" artifacts: name: "conduit-debug-x86_64-unknown-linux-musl" paths: - "conduit-debug-x86_64-unknown-linux-musl" expose_as: "Conduit DEBUG for x86_64-unknown-linux-musl" - - -# --------------------------------------------------------------------- # -# Cargo: Compiling deb packages for different architectures # -# --------------------------------------------------------------------- # - - -.build-cargo-deb-shared-settings: - stage: "build" - needs: [ ] - rules: - - if: '$CI_COMMIT_BRANCH == "master"' - - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' - interruptible: true - image: "rust:latest" - tags: ["docker"] - cache: - paths: - - cargohome - - target/ - key: "build_cache-deb-$TARGET" - before_script: - - 'echo "Building debian package for target $TARGET"' - - 'mkdir -p cargohome && CARGOHOME="cargohome"' - - "cat /etc/*-release && rustc --version && cargo --version" # Print version info for debugging - - 'apt-get update -yqq' - - 'echo "Installing packages: $NEEDED_PACKAGES"' - - "apt-get install -yqq --no-install-recommends $NEEDED_PACKAGES" - - "rustup target add $TARGET" - - "cargo install cargo-deb" - script: - - time cargo deb --target $TARGET - - 'mv target/$TARGET/debian/*.deb "conduit-$TARGET.deb"' - -build:cargo-deb:x86_64-unknown-linux-gnu: - extends: .build-cargo-deb-shared-settings - variables: - TARGET: "x86_64-unknown-linux-gnu" - NEEDED_PACKAGES: "" - artifacts: - name: "conduit-x86_64-unknown-linux-gnu.deb" - paths: - - "conduit-x86_64-unknown-linux-gnu.deb" - expose_as: "Debian Package x86_64" - - # --------------------------------------------------------------------- # # Create and publish docker image # # --------------------------------------------------------------------- # -# Build a docker image by packaging up the x86_64-unknown-linux-musl binary into an alpine image .docker-shared-settings: stage: "build docker image" - needs: [] - interruptible: true - image: - name: "gcr.io/kaniko-project/executor:debug" - entrypoint: [""] + image: jdrouet/docker-with-buildx:stable tags: ["docker"] - variables: - # Configure Kaniko Caching: https://cloud.google.com/build/docs/kaniko-cache - KANIKO_CACHE_ARGS: "--cache=true --cache-copy-layers=true --cache-ttl=120h --cache-repo $CI_REGISTRY_IMAGE/kaniko-ci-cache" - before_script: - - "mkdir -p /kaniko/.docker" - - 'echo "{\"auths\":{\"$CI_REGISTRY\":{\"username\":\"$CI_REGISTRY_USER\",\"password\":\"$CI_REGISTRY_PASSWORD\"},\"$DOCKER_HUB\":{\"username\":\"$DOCKER_HUB_USER\",\"password\":\"$DOCKER_HUB_PASSWORD\"}}}" > /kaniko/.docker/config.json' - - -build:docker:next: - extends: .docker-shared-settings + services: + - docker:dind needs: - "build:release:cargo:x86_64-unknown-linux-musl" + - "build:release:cargo:arm-unknown-linux-musleabihf" + - "build:release:cargo:armv7-unknown-linux-musleabihf" + - "build:release:cargo:aarch64-unknown-linux-musl" + variables: + PLATFORMS: "linux/arm/v6,linux/arm/v7,linux/arm64,linux/amd64" + DOCKER_FILE: "docker/ci-binaries-packaging.Dockerfile" + cache: + paths: + - docker_cache + key: "$CI_JOB_NAME" + before_script: + - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY + # Only log in to Dockerhub if the credentials are given: + - if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi script: + # Prepare buildx to build multiarch stuff: + - docker context create 'ci-context' + - docker buildx create --name 'multiarch-builder' --use 'ci-context' + # Copy binaries to their docker arch path + - mkdir -p linux/ && mv ./conduit-x86_64-unknown-linux-musl linux/amd64 + - mkdir -p linux/arm/ && mv ./conduit-arm-unknown-linux-musleabihf linux/arm/v6 + - mkdir -p linux/arm/ && mv ./conduit-armv7-unknown-linux-musleabihf linux/arm/v7 + - mv ./conduit-aarch64-unknown-linux-musl linux/arm64 + - 'export CREATED=$(date -u +''%Y-%m-%dT%H:%M:%SZ'') && echo "Docker image creation date: $CREATED"' + # Build and push image: - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --force - --context $CI_PROJECT_DIR - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') + docker buildx build + --pull + --push + --cache-from=type=local,src=$CI_PROJECT_DIR/docker_cache + --cache-to=type=local,dest=$CI_PROJECT_DIR/docker_cache + --build-arg CREATED=$CREATED --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:next" - --destination "$CI_REGISTRY_IMAGE/conduit:next-alpine" - --destination "$CI_REGISTRY_IMAGE/conduit:commit-$CI_COMMIT_SHORT_SHA" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:commit-$CI_COMMIT_SHORT_SHA" + --platform "$PLATFORMS" + --tag "$TAG" + --tag "$TAG-alpine" + --tag "$TAG-commit-$CI_COMMIT_SHORT_SHA" + --file "$DOCKER_FILE" . + +docker:next:gitlab: + extends: .docker-shared-settings rules: - if: '$CI_COMMIT_BRANCH == "next"' + variables: + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:next" +docker:next:dockerhub: + extends: .docker-shared-settings + rules: + - if: '$CI_COMMIT_BRANCH == "next" && $DOCKER_HUB' + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:next" -build:docker:master: +docker:master:gitlab: extends: .docker-shared-settings - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - script: - - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --context $CI_PROJECT_DIR - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:latest" - --destination "$CI_REGISTRY_IMAGE/conduit:latest-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest-alpine" rules: - if: '$CI_COMMIT_BRANCH == "master"' + variables: + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:latest" - -build:docker:tags: +docker:master:dockerhub: extends: .docker-shared-settings - needs: - - "build:release:cargo:x86_64-unknown-linux-musl" - script: - - > - /kaniko/executor - $KANIKO_CACHE_ARGS - --context $CI_PROJECT_DIR - --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') - --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) - --build-arg "GIT_REF=$CI_COMMIT_SHORT_SHA" - --dockerfile "$CI_PROJECT_DIR/docker/ci-binaries-packaging.Dockerfile" - --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG" - --destination "$CI_REGISTRY_IMAGE/conduit:$CI_COMMIT_TAG-alpine" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG" - --destination "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG-alpine" rules: - - if: '$CI_COMMIT_TAG' + - if: '$CI_COMMIT_BRANCH == "master" && $DOCKER_HUB' + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:latest" +docker:tags:gitlab: + extends: .docker-shared-settings + rules: + - if: "$CI_COMMIT_TAG" + variables: + TAG: "$CI_REGISTRY_IMAGE/matrix-conduit:$CI_COMMIT_TAG" +docker:tags:dockerhub: + extends: .docker-shared-settings + rules: + - if: "$CI_COMMIT_TAG && $DOCKER_HUB" + variables: + TAG: "$DOCKER_HUB_IMAGE/matrixconduit/matrix-conduit:$CI_COMMIT_TAG" # --------------------------------------------------------------------- # # Run tests # # --------------------------------------------------------------------- # -test:cargo: +.test-shared-settings: stage: "test" - needs: [ ] - image: "rust:latest" - tags: [ "docker" ] + needs: [] + image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" + tags: ["docker"] variables: - CARGO_HOME: "cargohome" - cache: - paths: - - target - - cargohome - key: test_cache + CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow interruptible: true + +test:cargo: + extends: .test-shared-settings + before_script: + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi + script: + - rustc --version && cargo --version # Print version info for debugging + - "cargo test --color always --workspace --verbose --locked --no-fail-fast -- -Z unstable-options --format json | gitlab-report -p test > $CI_PROJECT_DIR/report.xml" + artifacts: + when: always + reports: + junit: report.xml + + +test:clippy: + extends: .test-shared-settings + allow_failure: true before_script: - - mkdir -p $CARGO_HOME && echo "using $CARGO_HOME to cache cargo deps" - - apt-get update -yqq - - apt-get install -yqq --no-install-recommends build-essential libssl-dev pkg-config - - rustup component add clippy rustfmt + - rustup component add clippy + # If provided, bring in caching through sccache, which uses an external S3 endpoint to store compilation results: + - if [ -n "${SCCACHE_ENDPOINT}" ]; then export RUSTC_WRAPPER=/usr/local/cargo/bin/sccache; fi + script: + - rustc --version && cargo --version # Print version info for debugging + - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + artifacts: + when: always + reports: + codequality: gl-code-quality-report.json + +test:format: + extends: .test-shared-settings + before_script: + - rustup component add rustfmt script: - - rustc --version && cargo --version # Print version info for debugging - cargo fmt --all -- --check - - cargo test --workspace --verbose --locked - - cargo clippy + +test:audit: + extends: .test-shared-settings + allow_failure: true + script: + - cargo audit --color always || true + - cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json + artifacts: + when: always + reports: + sast: gl-sast-report.json test:sytest: stage: "test" @@ -316,10 +301,11 @@ test:sytest: - "build:debug:cargo:x86_64-unknown-linux-musl" image: name: "valkum/sytest-conduit:latest" - entrypoint: [ "" ] - tags: [ "docker" ] + entrypoint: [""] + tags: ["docker"] variables: PLUGINS: "https://github.com/valkum/sytest_conduit/archive/master.tar.gz" + interruptible: true before_script: - "mkdir -p /app" - "cp ./conduit-debug-x86_64-unknown-linux-musl /app/conduit" @@ -330,7 +316,7 @@ test:sytest: script: - "SYTEST_EXIT_CODE=0" - "/bootstrap.sh conduit || SYTEST_EXIT_CODE=1" - - "perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml \"Sytest\" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap" + - 'perl /sytest/tap-to-junit-xml.pl --puretap --input /logs/results.tap --output $CI_PROJECT_DIR/sytest.xml "Sytest" && cp /logs/results.tap $CI_PROJECT_DIR/results.tap' - "exit $SYTEST_EXIT_CODE" artifacts: when: always @@ -340,6 +326,40 @@ test:sytest: reports: junit: "$CI_PROJECT_DIR/sytest.xml" +test:dockerlint: + stage: "test" + needs: [] + image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine + interruptible: true + script: + - hadolint --version + # First pass: Print for CI log: + - > + hadolint + --no-fail --verbose + ./Dockerfile + ./docker/ci-binaries-packaging.Dockerfile + # Then output the results into a json for GitLab to pretty-print this in the MR: + - > + hadolint + --format gitlab_codeclimate + --failure-threshold error + ./Dockerfile + ./docker/ci-binaries-packaging.Dockerfile > dockerlint.json + artifacts: + when: always + reports: + codequality: dockerlint.json + paths: + - dockerlint.json + rules: + - if: '$CI_COMMIT_REF_NAME != "master"' + changes: + - docker/*Dockerfile + - Dockerfile + - .gitlab-ci.yml + - if: '$CI_COMMIT_REF_NAME == "master"' + - if: '$CI_COMMIT_REF_NAME == "next"' # --------------------------------------------------------------------- # # Store binaries as package so they have download urls # @@ -348,25 +368,32 @@ test:sytest: publish:package: stage: "upload artifacts" needs: - - "build:release:cargo:x86_64-unknown-linux-gnu" - - "build:release:cargo:armv7-unknown-linux-gnueabihf" - - "build:release:cargo:aarch64-unknown-linux-gnu" - "build:release:cargo:x86_64-unknown-linux-musl" - - "build:cargo-deb:x86_64-unknown-linux-gnu" + - "build:release:cargo:arm-unknown-linux-musleabihf" + - "build:release:cargo:armv7-unknown-linux-musleabihf" + - "build:release:cargo:aarch64-unknown-linux-musl" + # - "build:cargo-deb:x86_64-unknown-linux-gnu" rules: - if: '$CI_COMMIT_BRANCH == "master"' - if: '$CI_COMMIT_BRANCH == "next"' - - if: '$CI_COMMIT_TAG' + - if: "$CI_COMMIT_TAG" image: curlimages/curl:latest tags: ["docker"] variables: GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts script: - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-gnueabihf "${BASE_URL}/conduit-armv7-unknown-linux-gnueabihf"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-gnu "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-musl "${BASE_URL}/conduit-x86_64-unknown-linux-musl"' - - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-x86_64-unknown-linux-gnu.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' - + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-arm-unknown-linux-musleabihf "${BASE_URL}/conduit-arm-unknown-linux-musleabihf"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-armv7-unknown-linux-musleabihf "${BASE_URL}/conduit-armv7-unknown-linux-musleabihf"' + - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file conduit-aarch64-unknown-linux-musl "${BASE_URL}/conduit-aarch64-unknown-linux-musl"' +# Avoid duplicate pipelines +# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines +workflow: + rules: + - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' + - if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS" + when: never + - if: "$CI_COMMIT_BRANCH" + - if: "$CI_COMMIT_TAG" diff --git a/.vscode/extensions.json b/.vscode/extensions.json new file mode 100644 index 00000000..037f20d7 --- /dev/null +++ b/.vscode/extensions.json @@ -0,0 +1,11 @@ +{ + "recommendations": [ + "rust-lang.rust-analyzer", + "bungcip.better-toml", + "ms-azuretools.vscode-docker", + "eamodio.gitlens", + "serayuzgur.crates", + "vadimcn.vscode-lldb", + "timonwong.shellcheck" + ] +} \ No newline at end of file diff --git a/.vscode/launch.json b/.vscode/launch.json new file mode 100644 index 00000000..da521604 --- /dev/null +++ b/.vscode/launch.json @@ -0,0 +1,35 @@ +{ + // Use IntelliSense to learn about possible attributes. + // Hover to view descriptions of existing attributes. + // For more information, visit: https://go.microsoft.com/fwlink/?linkid=830387 + "version": "0.2.0", + "configurations": [ + { + "type": "lldb", + "request": "launch", + "name": "Debug conduit", + "sourceLanguages": ["rust"], + "cargo": { + "args": [ + "build", + "--bin=conduit", + "--package=conduit" + ], + "filter": { + "name": "conduit", + "kind": "bin" + } + }, + "args": [], + "env": { + "RUST_BACKTRACE": "1", + "CONDUIT_CONFIG": "", + "CONDUIT_SERVER_NAME": "localhost", + "CONDUIT_DATABASE_PATH": "/tmp", + "CONDUIT_ADDRESS": "0.0.0.0", + "CONDUIT_PORT": "6167" + }, + "cwd": "${workspaceFolder}" + } + ] +} \ No newline at end of file diff --git a/.vscode/settings.json b/.vscode/settings.json index c3f66054..95294d48 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -1,3 +1,3 @@ { - "rust-analyzer.procMacro.enable": true + "rust-analyzer.procMacro.enable": true, } \ No newline at end of file diff --git a/APPSERVICES.md b/APPSERVICES.md index 26c34cc4..8ca015a0 100644 --- a/APPSERVICES.md +++ b/APPSERVICES.md @@ -2,7 +2,7 @@ ## Getting help -If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +If you run into any problems while setting up an Appservice, write an email to `timo@koesters.xyz`, ask us in [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Set up the appservice - general instructions @@ -18,7 +18,7 @@ First, go into the #admins room of your homeserver. The first person that registered on the homeserver automatically joins it. Then send a message into the room like this: - @conduit:your.server.name: register_appservice + @conduit:your.server.name: register-appservice ``` paste the @@ -31,7 +31,7 @@ the room like this: ``` You can confirm it worked by sending a message like this: -`@conduit:your.server.name: list_appservices` +`@conduit:your.server.name: list-appservices` The @conduit bot should answer with `Appservices (1): your-bridge` @@ -42,6 +42,14 @@ could help. ## Appservice-specific instructions +### Remove an appservice + +To remove an appservice go to your admin room and execute + +`@conduit:your.server.name: unregister-appservice ` + +where `` one of the output of `list-appservices`. + ### Tested appservices These appservices have been tested and work with Conduit without any extra steps: @@ -49,38 +57,5 @@ These appservices have been tested and work with Conduit without any extra steps - [matrix-appservice-discord](https://github.com/Half-Shot/matrix-appservice-discord) - [mautrix-hangouts](https://github.com/mautrix/hangouts/) - [mautrix-telegram](https://github.com/mautrix/telegram/) - -### [mautrix-signal](https://github.com/mautrix/signal) - -There are a few things you need to do, in order for the Signal bridge (at least -up to version `0.2.0`) to work. How you do this depends on whether you use -Docker or `virtualenv` to run it. In either case you need to modify -[portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py). -Do this **before** following the bridge installation guide. - -1. **Create a copy of `portal.py`**. Go to - [portal.py](https://github.com/mautrix/signal/blob/master/mautrix_signal/portal.py) -at [mautrix-signal](https://github.com/mautrix/signal) (make sure you change to -the correct commit/version of mautrix-signal you're using) and copy its -content. Create a new `portal.py` on your system and paste the content in. -2. **Patch the copy**. Exact line numbers may be slightly different, look nearby if they don't match: - - [Line 1020](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1020) - ```diff - --- levels.users[self.main_intent.mxid] = 9001 if is_initial else 100 - +++ levels.users[self.main_intent.mxid] = 100 if is_initial else 100 - ``` - - [Between lines 1041 and 1042](https://github.com/mautrix/signal/blob/4ea831536f154aba6419d13292479eb383ea3308/mautrix_signal/portal.py#L1041-L1042) add a new line: - ```diff - "type": str(EventType.ROOM_POWER_LEVELS), - +++ "state_key": "", - "content": power_levels.serialize(), - ``` -3. **Deploy the patch**. This is different depending on how you have `mautrix-signal` deployed: - - [*If using virtualenv*] Copy your patched `portal.py` to `./lib/python3.7/site-packages/mautrix_signal/portal.py` (the exact version of Python may be different on your system). - - [*If using Docker*] Map the patched `portal.py` into the `mautrix-signal` container: - - ```yaml - volumes: - - ./your/path/on/host/portal.py:/usr/lib/python3.9/site-packages/mautrix_signal/portal.py - ``` -4. Now continue with the [bridge installation instructions ](https://docs.mau.fi/bridges/index.html) and the general bridge notes above. +- [mautrix-signal](https://github.com/mautrix/signal/) from version `0.2.2` forward. +- [heisenbridge](https://github.com/hifi/heisenbridge/) diff --git a/CROSS_COMPILE.md b/CROSS_COMPILE.md deleted file mode 100644 index e38a6ad7..00000000 --- a/CROSS_COMPILE.md +++ /dev/null @@ -1,11 +0,0 @@ -Install docker: - -``` -$ sudo apt install docker -$ sudo usermod -aG docker $USER -$ exec sudo su -l $USER -$ sudo systemctl start docker -$ cargo install cross -$ cross build --release --target armv7-unknown-linux-musleabihf -``` -The cross-compiled binary is at target/armv7-unknown-linux-musleabihf/release/conduit diff --git a/Cargo.lock b/Cargo.lock index 293bcff7..48ce6c04 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3,40 +3,52 @@ version = 3 [[package]] -name = "adler32" -version = "1.2.0" +name = "adler" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aae1277d39aeec15cb388266ecc24b11c80469deae6067e17a1a7aa9e5c1f234" +checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe" [[package]] name = "ahash" -version = "0.7.4" +version = "0.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "43bb833f0bf979d8475d38fbf09ed3b8a55e1885fe93ad3f93239fc6a4f17b98" +checksum = "fcb51a0695d8f838b1ee009b3fbf66bda078cd64590202a864a8f3e8c4315c47" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.7", "once_cell", "version_check", ] [[package]] name = "aho-corasick" -version = "0.7.18" +version = "0.7.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e37cfd5e7657ada45f742d6e99ca5788580b5c529dc78faf11ece6dc702656f" +checksum = "b4f55bd91a0978cbfd91c457a164bab8b4001c833b7f323132c0a4e1922dd44e" dependencies = [ "memchr", ] [[package]] -name = "ansi_term" -version = "0.12.1" +name = "alloc-no-stdlib" +version = "2.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3" + +[[package]] +name = "alloc-stdlib" +version = "0.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d52a9bb7ec0cf484c551830a7ce27bd20d67eac647e1befb56b0be4ee39a55d2" +checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece" dependencies = [ - "winapi", + "alloc-no-stdlib", ] +[[package]] +name = "arc-swap" +version = "1.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "983cd8b9d4b02a6dc6ffa557262eb5858a27a0038ffffe21a0f133eaa819a164" + [[package]] name = "arrayref" version = "0.3.6" @@ -45,9 +57,9 @@ checksum = "a4c527152e37cf757a3f78aae5a06fbeefdb07ccc535c980a3208ee3060dd544" [[package]] name = "arrayvec" -version = "0.5.2" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23b62fc65de8e4e7f52534fb52b0f3ed04746ae267519eef2a83941e8085068b" +checksum = "8da52d66c7071e2e3fa2a1e5c6d088fec47b593032b254f5e980de8ea54454d6" [[package]] name = "assign" @@ -56,31 +68,24 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f093eed78becd229346bf859eec0aa4dd7ddde0757287b2b4107a1f09c80002" [[package]] -name = "async-stream" -version = "0.3.2" +name = "async-compression" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "171374e7e3b2504e0e5236e3b59260560f9fe94bfe9ac39ba5e4e929c5590625" +checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a" dependencies = [ - "async-stream-impl", + "brotli", + "flate2", "futures-core", -] - -[[package]] -name = "async-stream-impl" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "648ed8c8d2ce5409ccd57453d9d1b214b342a0d69376a6feda1fd6cae3299308" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "memchr", + "pin-project-lite", + "tokio", ] [[package]] name = "async-trait" -version = "0.1.51" +version = "0.1.57" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44318e776df68115a881de9a8fd1b9e53368d7a4a5ce4cc48517da3393233a5e" +checksum = "76464446b8bc32758d7e88ee1a804d9914cd9b1cb264c029899680b0be29826f" dependencies = [ "proc-macro2", "quote", @@ -89,41 +94,86 @@ dependencies = [ [[package]] name = "atomic" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3410529e8288c463bedb5930f82833bc0c90e5d2fe639a56582a4d09220b281" +checksum = "b88d82667eca772c4aa12f0f1348b3ae643424c8876448f3f7bd5787032e234c" dependencies = [ "autocfg", ] [[package]] -name = "atty" -version = "0.2.14" +name = "autocfg" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d9b39be18770d11421cdb1b9947a45dd3f37e93092cbf377614828a319d5fee8" -dependencies = [ - "hermit-abi", - "libc", - "winapi", -] +checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" [[package]] -name = "autocfg" -version = "1.0.1" +name = "axum" +version = "0.5.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cdb031dd78e28731d87d56cc8ffef4a8f36ca26c38fe2de700543e627f8a464a" +checksum = "c9e3356844c4d6a6d6467b8da2cffb4a2820be256f50a3a386c9d152bab31043" +dependencies = [ + "async-trait", + "axum-core", + "bitflags", + "bytes", + "futures-util", + "headers", + "http", + "http-body", + "hyper", + "itoa", + "matchit", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower", + "tower-http", + "tower-layer", + "tower-service", +] [[package]] -name = "base-x" +name = "axum-core" version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4521f3e3d031370679b3b140beb36dfe4801b09ac77e30c61941f97df3ef28b" +checksum = "d9f0c0a60006f2a293d82d571f635042a72edf927539b7685bd62d361963839b" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http", + "http-body", + "mime", + "tower-layer", + "tower-service", +] [[package]] -name = "base64" -version = "0.12.3" +name = "axum-server" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3441f0f7b02788e948e47f457ca01f1d7e6d92c693bc132c22b087d3141c03ff" +checksum = "87ba6170b61f7b086609dabcae68d2e07352539c6ef04a7c82980bdfa01a159d" +dependencies = [ + "arc-swap", + "bytes", + "futures-util", + "http", + "http-body", + "hyper", + "pin-project-lite", + "rustls", + "rustls-pemfile 1.0.1", + "tokio", + "tokio-rustls", + "tower-service", +] [[package]] name = "base64" @@ -132,10 +182,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "904dfeac50f3cdaba28fc6f57fdcddb75f49ed61346676a78c4ffe55877802fd" [[package]] -name = "binascii" -version = "0.1.4" +name = "base64ct" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "383d29d513d8764dcdc42ea295d979eb99c3c9f00607b3692cf68a431f7dca72" +checksum = "ea2b2456fd614d856680dcd9fcc660a51a820fa09daef2e49772b56a193c8474" [[package]] name = "bincode" @@ -146,6 +196,25 @@ dependencies = [ "serde", ] +[[package]] +name = "bindgen" +version = "0.59.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bd2a9a458e8f4304c52c43ebb0cfbd520289f8379a52e329a38afda99bf8eb8" +dependencies = [ + "bitflags", + "cexpr", + "clang-sys", + "lazy_static", + "lazycell", + "peeking_take_while", + "proc-macro2", + "quote", + "regex", + "rustc-hash", + "shlex", +] + [[package]] name = "bitflags" version = "1.3.2" @@ -154,9 +223,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "blake2b_simd" -version = "0.5.11" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afa748e348ad3be8263be728124b24a24f268266f6f5d58af9d75f6a40b5c587" +checksum = "72936ee4afc7f8f736d1c38383b56480b5497b4617b4a77bdbf1d2ababc76127" dependencies = [ "arrayref", "arrayvec", @@ -172,17 +241,47 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-buffer" +version = "0.10.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cce20737498f97b993470a6e536b8523f0af7892a4f928cceb1ac5e52ebe7e" +dependencies = [ + "generic-array", +] + +[[package]] +name = "brotli" +version = "3.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", + "brotli-decompressor", +] + +[[package]] +name = "brotli-decompressor" +version = "2.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "59ad2d4653bf5ca36ae797b1f4bb4dbddb60ce49ca4aed8a2ce4829f60425b80" +dependencies = [ + "alloc-no-stdlib", + "alloc-stdlib", +] + [[package]] name = "bumpalo" -version = "3.7.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c59e7af012c713f529e7a3ee57ce9b31ddd858d4b512923602f74608b009631" +checksum = "c1ad822118d20d2c234f427000d5acc36eabe1e29a348c89b63dd60b13f28e5d" [[package]] name = "bytemuck" -version = "1.7.2" +version = "1.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "72957246c41db82b8ef88a5486143830adeb8227ef9837740bdec67724cf2c5b" +checksum = "2f5715e491b5a1598fc2bef5a606847b5dc1d48ea625bd3c02c00de8285591da" [[package]] name = "byteorder" @@ -192,24 +291,27 @@ checksum = "14c189c53d098945499cdfa7ecc63567cf3886b3332b312a5b4585d8d3a6a610" [[package]] name = "bytes" -version = "1.1.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4872d67bab6358e59559027aa3b9157c53d9358c51423c17554809a8858e0f8" +checksum = "ec8a7b6a70fde80372154c65702f00a0f56f3e1c36abbc6c440484be248856db" [[package]] name = "cc" -version = "1.0.70" +version = "1.0.73" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26a6ce4b6a484fa3edb70f7efa6fc430fd2b87285fe8b84304fd0936faa0dc0" +checksum = "2fff2a6927b3bb87f9595d67196a70493f627687a71d87a0d692242c33f58c11" dependencies = [ "jobserver", ] [[package]] -name = "cfg-if" -version = "0.1.10" +name = "cexpr" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4785bdd1c96b2a846b2bd7cc02e86b6b3dbf14e7e53446c4f54c92a361040822" +checksum = "6fac387a98bb7c37292057cffc56d62ecb629900026402633ae9160df93a8766" +dependencies = [ + "nom", +] [[package]] name = "cfg-if" @@ -218,16 +320,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" [[package]] -name = "chrono" -version = "0.4.19" +name = "clang-sys" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "670ad68c9088c2a963aaa298cb369688cf3f9465ce5e2d4ca10e6e0098a1ce73" +checksum = "fa2e27ae6ab525c3d369ded447057bca5438d86dc3a68f6faafb8269ba82ebf3" dependencies = [ + "glob", "libc", - "num-integer", - "num-traits", - "time 0.1.43", - "winapi", + "libloading", +] + +[[package]] +name = "clap" +version = "4.0.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4ed45cc2c62a3eff523e718d8576ba762c83a3146151093283ac62ae11933a73" +dependencies = [ + "bitflags", + "clap_derive", + "clap_lex", + "once_cell", +] + +[[package]] +name = "clap_derive" +version = "4.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db342ce9fda24fb191e2ed4e102055a4d381c1086a06630174cd8da8d5d917ce" +dependencies = [ + "heck", + "proc-macro-error", + "proc-macro2", + "quote", + "syn", +] + +[[package]] +name = "clap_lex" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0d4198f73e42b4936b35b5bb248d81d2b595ecb170da0bac7655c54eedfa8da8" +dependencies = [ + "os_str_bytes", ] [[package]] @@ -238,57 +372,61 @@ checksum = "3d7b894f5411737b7867f4827955924d7c254fc9f4d91a6aad6b097804b1018b" [[package]] name = "conduit" -version = "0.2.0" +version = "0.4.0-next" dependencies = [ - "base64 0.13.0", + "async-trait", + "axum", + "axum-server", + "base64", "bytes", + "clap", "crossbeam", "directories", + "figment", + "futures-util", "heed", + "hmac", "http", "image", "jsonwebtoken", + "lazy_static", "lru-cache", "num_cpus", "opentelemetry", "opentelemetry-jaeger", "parking_lot", - "rand 0.8.4", + "persy", + "rand 0.8.5", "regex", "reqwest", "ring", - "rocket", + "rocksdb", "ruma", "rusqlite", "rust-argon2", - "rustls", - "rustls-native-certs", "serde", "serde_json", "serde_yaml", - "sled", + "sha-1", "thiserror", "thread_local", "threadpool", + "tikv-jemalloc-ctl", + "tikv-jemallocator", "tokio", + "tower", + "tower-http", "tracing", "tracing-flame", "tracing-subscriber", "trust-dns-resolver", - "webpki 0.22.0", ] [[package]] name = "const-oid" -version = "0.6.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44c32f031ea41b4291d695026c023b95d59db2d8a2c7640800ed56bc8f510f22" - -[[package]] -name = "const_fn" -version = "0.4.8" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f92cfa0fd5690b3cf8c1ef2cabbd9b7ef22fa53cf5e1f92b05103f6d5d1cf6e7" +checksum = "722e23542a15cea1f65d4a1419c4cfd7a26706c70871a13a04238ca3f40f1661" [[package]] name = "constant_time_eq" @@ -296,22 +434,11 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "245097e9a4535ee1e3e3931fcfcd55a796a44c643e8596ff6566d68f09b87bbc" -[[package]] -name = "cookie" -version = "0.15.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5f1c7727e460397e56abc4bddc1d49e07a1ad78fc98eb2e1c8f032a58a2f80d" -dependencies = [ - "percent-encoding", - "time 0.2.27", - "version_check", -] - [[package]] name = "core-foundation" -version = "0.9.1" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a89e2ae426ea83155dccf10c0fa6b1463ef6d5fcb44cee0b224a408fa640a62" +checksum = "194a7a9e6de53fa55116934067c844d9d749312f75c6f6d0980e8c252f8c2146" dependencies = [ "core-foundation-sys", "libc", @@ -319,113 +446,118 @@ dependencies = [ [[package]] name = "core-foundation-sys" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea221b5284a47e40033bf9b66f35f984ec0ea2931eb03505246cd27a963f981b" +checksum = "5827cebf4670468b8772dd191856768aedcb1b0278a04f989f7766351917b9dc" [[package]] name = "cpufeatures" -version = "0.2.1" +version = "0.2.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95059428f66df56b63431fdb4e1947ed2190586af5c5a8a8b71122bdf5a7f469" +checksum = "28d997bd5e24a5928dd43e46dc529867e207907fe0b239c3477d924f7f2ca320" dependencies = [ "libc", ] +[[package]] +name = "crc" +version = "2.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "49fc9a695bca7f35f5f4c15cddc84415f66a74ea78eef08e90c5024f2b540e23" +dependencies = [ + "crc-catalog", +] + +[[package]] +name = "crc-catalog" +version = "1.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ccaeedb56da03b09f598226e25e80088cb4cd25f316e6e4df7d695f0feeb1403" + [[package]] name = "crc32fast" -version = "1.2.1" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81156fece84ab6a9f2afdb109ce3ae577e42b1228441eded99bd77f627953b1a" +checksum = "b540bd8bc810d3885c6ea91e2018302f68baba2129ab3e88f32389ee9370880d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "crossbeam" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ae5588f6b3c3cb05239e90bd110f257254aecd01e4635400391aeae07497845" +checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-channel", "crossbeam-deque", "crossbeam-epoch", - "crossbeam-queue 0.3.2", - "crossbeam-utils 0.8.5", + "crossbeam-queue", + "crossbeam-utils", ] [[package]] name = "crossbeam-channel" -version = "0.5.1" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06ed27e177f16d65f0f0c22a213e17c696ace5dd64b14258b52f9417ccb52db4" +checksum = "c2dd04ddaf88237dc3b8d8f9a3c1004b506b54b3313403944054d23c0870c521" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "cfg-if", + "crossbeam-utils", ] [[package]] name = "crossbeam-deque" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6455c0ca19f0d2fbf751b908d5c55c1f5cbc65e03c4225427254b46890bdde1e" +checksum = "715e8152b692bba2d374b53d4875445368fdf21a94751410af607a5ac677d1fc" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "crossbeam-epoch", - "crossbeam-utils 0.8.5", + "crossbeam-utils", ] [[package]] name = "crossbeam-epoch" -version = "0.9.5" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4ec02e091aa634e2c3ada4a392989e7c3116673ef0ac5b72232439094d73b7fd" +checksum = "f916dfc5d356b0ed9dae65f1db9fc9770aa2851d2662b988ccf4fe3516e86348" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", - "lazy_static", + "autocfg", + "cfg-if", + "crossbeam-utils", "memoffset", "scopeguard", ] [[package]] name = "crossbeam-queue" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c979cd6cfe72335896575c6b5688da489e420d36a27a0b9eb0c73db574b4a4b" -dependencies = [ - "crossbeam-utils 0.6.6", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.2" +version = "0.3.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b10ddc024425c88c2ad148c1b0fd53f4c6d38db9697c9f1588381212fa657c9" +checksum = "1cd42583b04998a5363558e5f9291ee5a5ff6b49944332103f251e7479a82aa7" dependencies = [ - "cfg-if 1.0.0", - "crossbeam-utils 0.8.5", + "cfg-if", + "crossbeam-utils", ] [[package]] name = "crossbeam-utils" -version = "0.6.6" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04973fa96e96579258a5091af6003abde64af786b860f18622b82e026cca60e6" +checksum = "edbafec5fa1f196ca66527c1b12c2ec4745ca14b50f1ad8f9f6f720b55d11fac" dependencies = [ - "cfg-if 0.1.10", - "lazy_static", + "cfg-if", ] [[package]] -name = "crossbeam-utils" -version = "0.8.5" +name = "crypto-common" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d82cfc11ce7f2c3faef78d8a684447b40d503d9681acebed6cb728d45940c4db" +checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3" dependencies = [ - "cfg-if 1.0.0", - "lazy_static", + "generic-array", + "typenum", ] [[package]] @@ -435,7 +567,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0b9fdf9972b2bd6af2d913799d9ebc165ea4d2e65878e329d9c6b372c4491b61" dependencies = [ "byteorder", - "digest", + "digest 0.9.0", "rand_core 0.5.1", "subtle", "zeroize", @@ -447,104 +579,61 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3ee2393c4a91429dffb4bedf19f4d6abf27d8a732c8ce4980305d782e5426d57" -[[package]] -name = "deflate" -version = "0.8.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73770f8e1fe7d64df17ca66ad28994a0a623ea497fa69486e14984e715c5d174" -dependencies = [ - "adler32", - "byteorder", -] - [[package]] name = "der" -version = "0.4.1" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "31e21d2d0f22cde6e88694108429775c0219760a07779bf96503b434a03d7412" +checksum = "13dd2ae565c0a381dde7fade45fce95984c568bdcb4700a4fdbe3175e0380b2f" dependencies = [ "const-oid", + "zeroize", ] [[package]] -name = "devise" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "50c7580b072f1c8476148f16e0a0d5dedddab787da98d86c5082c5e9ed8ab595" -dependencies = [ - "devise_codegen", - "devise_core", -] - -[[package]] -name = "devise_codegen" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "123c73e7a6e51b05c75fe1a1b2f4e241399ea5740ed810b0e3e6cacd9db5e7b2" -dependencies = [ - "devise_core", - "quote", -] - -[[package]] -name = "devise_core" -version = "0.3.1" +name = "digest" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "841ef46f4787d9097405cac4e70fb8644fc037b526e8c14054247c0263c400d0" +checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" dependencies = [ - "bitflags", - "proc-macro2", - "proc-macro2-diagnostics", - "quote", - "syn", + "generic-array", ] [[package]] name = "digest" -version = "0.9.0" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d3dd60d1080a57a05ab032377049e0591415d2b31afd7028356dbf3cc6dcb066" +checksum = "adfbc57365a37acbd2ebf2b64d7e69bb766e2fea813521ed536f5d0520dcf86c" dependencies = [ - "generic-array", + "block-buffer 0.10.3", + "crypto-common", + "subtle", ] [[package]] name = "directories" -version = "3.0.2" +version = "4.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e69600ff1703123957937708eb27f7a564e48885c537782722ed0ba3189ce1d7" +checksum = "f51c5d4ddabd36886dd3e1438cb358cdcb0d7c499cb99cb4ac2e38e18b5cb210" dependencies = [ "dirs-sys", ] [[package]] name = "dirs-sys" -version = "0.3.6" +version = "0.3.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "03d86534ed367a67548dc68113a0f5db55432fdfbb6e6f9d77704397d95d5780" +checksum = "1b1d1d91c932ef41c0f2663aa8b0ca0342d444d842c06914aa0a7e352d0bada6" dependencies = [ "libc", "redox_users", "winapi", ] -[[package]] -name = "discard" -version = "1.0.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "212d0f5754cb6769937f4501cc0e67f4f4483c8d2c3e1e922ee9edbe4ab4c7c0" - -[[package]] -name = "dtoa" -version = "0.4.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56899898ce76aaf4a0f24d914c97ea6ed976d42fec6ad33fcbb0a1103e07b2b0" - [[package]] name = "ed25519" -version = "1.2.0" +version = "1.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4620d40f6d2601794401d6dd95a5cf69b6c157852539470eeda433a99b3c0efc" +checksum = "1e9c280362032ea4203659fc489832d0204ef09f247a0506f170dafcac08c369" dependencies = [ "signature", ] @@ -565,24 +654,24 @@ dependencies = [ [[package]] name = "either" -version = "1.6.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e78d4f1cc4ae33bbfc157ed5d5a5ef3bc29227303d595861deb238fcec4e9457" +checksum = "90e5c1c8368803113bf0c9584fc495a58b86dc8a29edbf8fe877d21d9507e797" [[package]] name = "encoding_rs" -version = "0.8.28" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "80df024fbc5ac80f87dfef0d9f5209a252f2a497f7f42944cff24d8253cac065" +checksum = "9852635589dc9f9ea1b6fe9f05b50ef208c85c834a562f0c6abb1c475736ec2b" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", ] [[package]] name = "enum-as-inner" -version = "0.3.3" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7c5f0096a91d210159eceb2ff5e1c4da18388a170e1e3ce948aac9c8fdbbf595" +checksum = "c9720bba047d567ffc8a3cba48bf19126600e249ab7f128e9233e6376976a116" dependencies = [ "heck", "proc-macro2", @@ -604,9 +693,9 @@ checksum = "7360491ce676a36bf9bb3c56c1aa791658183a54d2744120f27285738d90465a" [[package]] name = "figment" -version = "0.10.6" +version = "0.10.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "790b4292c72618abbab50f787a477014fe15634f96291de45672ce46afe122df" +checksum = "4e56602b469b2201400dec66a66aec5a9b8761ee97cd1b8c96ab2483fcc16cc9" dependencies = [ "atomic", "pear", @@ -617,18 +706,27 @@ dependencies = [ ] [[package]] -name = "fnv" -version = "1.0.7" +name = "flate2" +version = "1.0.24" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f82b0f4c27ad9f8bfd1f3208d882da2b09c301bc1c828fd3a00d0216d2fbbff6" +dependencies = [ + "crc32fast", + "miniz_oxide", +] + +[[package]] +name = "fnv" +version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" [[package]] name = "form_urlencoded" -version = "1.0.1" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fc25a87fa4fd2094bffb06925852034d90a17f0d1e05197d4956d3555752191" +checksum = "a9c384f161156f5260c24a097c56119f9be8c798586aecc13afbcbe7b7e26bf8" dependencies = [ - "matches", "percent-encoding", ] @@ -642,11 +740,17 @@ dependencies = [ "winapi", ] +[[package]] +name = "fs_extra" +version = "1.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2022715d62ab30faffd124d40b76f4134a550a87792276512b18d63272333394" + [[package]] name = "futures" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a12aa0eb539080d55c3f2d45a67c3b58b6b0773c1a3ca2dfec66d58c97fd66ca" +checksum = "7f21eda599937fba36daeb58a22e8f5cee2d14c4a17b5b7739c7c8e5e3b8230c" dependencies = [ "futures-channel", "futures-core", @@ -659,9 +763,9 @@ dependencies = [ [[package]] name = "futures-channel" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5da6ba8c3bb3c165d3c7319fc1cc8304facf1fb8db99c5de877183c08a273888" +checksum = "30bdd20c28fadd505d0fd6712cdfcb0d4b5648baf45faef7f852afb2399bb050" dependencies = [ "futures-core", "futures-sink", @@ -669,15 +773,15 @@ dependencies = [ [[package]] name = "futures-core" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d1c26957f23603395cd326b0ffe64124b818f4449552f960d815cfba83a53d" +checksum = "4e5aa3de05362c3fb88de6531e6296e85cde7739cccad4b9dfeeb7f6ebce56bf" [[package]] name = "futures-executor" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "45025be030969d763025784f7f355043dc6bc74093e4ecc5000ca4dc50d8745c" +checksum = "9ff63c23854bee61b6e9cd331d523909f238fc7636290b96826e9cfa5faa00ab" dependencies = [ "futures-core", "futures-task", @@ -686,18 +790,16 @@ dependencies = [ [[package]] name = "futures-io" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "522de2a0fe3e380f1bc577ba0474108faf3f6b18321dbf60b3b9c39a75073377" +checksum = "bbf4d2a7a308fd4578637c0b17c7e1c7ba127b8f6ba00b29f717e9655d85eb68" [[package]] name = "futures-macro" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "18e4a4b95cea4b4ccbcf1c5675ca7c4ee4e9e75eb79944d07defde18068f79bb" +checksum = "42cd15d1c7456c04dbdf7e88bcd69760d74f3a798d6444e16974b505b0e62f17" dependencies = [ - "autocfg", - "proc-macro-hack", "proc-macro2", "quote", "syn", @@ -705,23 +807,22 @@ dependencies = [ [[package]] name = "futures-sink" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36ea153c13024fe480590b3e3d4cad89a0cfacecc24577b68f86c6ced9c2bc11" +checksum = "21b20ba5a92e727ba30e72834706623d94ac93a725410b6a6b6fbc1b07f7ba56" [[package]] name = "futures-task" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d3d00f4eddb73e498a54394f228cd55853bdf059259e8e7bc6e69d408892e99" +checksum = "a6508c467c73851293f390476d4491cf4d227dbabcd4170f3bb6044959b294f1" [[package]] name = "futures-util" -version = "0.3.17" +version = "0.3.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "36568465210a3a6ee45e1f165136d68671471a501e632e9a98d96872222b5481" +checksum = "44fb6cb1be61cc1d2e43b262516aafcf63b241cffdb1d3fa115f91d9c7b09c90" dependencies = [ - "autocfg", "futures-channel", "futures-core", "futures-io", @@ -731,38 +832,14 @@ dependencies = [ "memchr", "pin-project-lite", "pin-utils", - "proc-macro-hack", - "proc-macro-nested", "slab", ] -[[package]] -name = "fxhash" -version = "0.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c31b6d751ae2c7f11320402d34e41349dd1016f8d5d45e48c4312bc8625af50c" -dependencies = [ - "byteorder", -] - -[[package]] -name = "generator" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1d9279ca822891c1a4dae06d185612cf8fc6acfe5dff37781b41297811b12ee" -dependencies = [ - "cc", - "libc", - "log", - "rustversion", - "winapi", -] - [[package]] name = "generic-array" -version = "0.14.4" +version = "0.14.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501466ecc8a30d1d3b7fc9229b122b2ce8ed6e9d9223f1138d4babb253e51817" +checksum = "bff49e947297f3312447abdca79f45f4738097cc82b06e72054d2223f601f1b9" dependencies = [ "typenum", "version_check", @@ -774,27 +851,27 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8fc3cb4d91f53b50155bdcfd23f6a4c39ae1969c2ae85982b135750cccaf5fce" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", "wasi 0.9.0+wasi-snapshot-preview1", ] [[package]] name = "getrandom" -version = "0.2.3" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fcd999463524c52659517fe2cea98493cfe485d10565e7b0fb07dbba7ad2753" +checksum = "4eb1a864a501629691edf6c15a593b7a51eebaa1e8468e9ddc623de7c9b58ec6" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "libc", - "wasi 0.10.2+wasi-snapshot-preview1", + "wasi 0.11.0+wasi-snapshot-preview1", ] [[package]] name = "gif" -version = "0.11.2" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a668f699973d0f573d15749b7002a9ac9e1f9c6b220e7b165601334c173d8de" +checksum = "3edd93c6756b4dfaf2709eafcc345ba2636565295c198a9cfbf75fa5e3e00b06" dependencies = [ "color_quant", "weezl", @@ -808,9 +885,9 @@ checksum = "9b919933a397b79c37e33b77bb2aa3dc8eb6e165ad809e58ff75bc7db2e34574" [[package]] name = "h2" -version = "0.3.4" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7f3675cfef6a30c8031cf9e6493ebdc3bb3272a3fea3923c4210d1830e6a472" +checksum = "5ca32592cf21ac7ccab1825cd87f6c9b3d9022c44d086172ed0966bec8af30be" dependencies = [ "bytes", "fnv", @@ -827,31 +904,53 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.11.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ab5ef0d4909ef3724cc8cce6ccc8572c5c817592e9285f5464f8e86f8bd3726e" +checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" dependencies = [ "ahash", ] [[package]] name = "hashlink" -version = "0.7.0" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7249a3129cbc1ffccd74857f81464a323a152173cdb134e0fd81bc803b29facf" +checksum = "69fe1fcf8b4278d860ad0548329f892a3631fb63f82574df68275f34cdbe0ffa" dependencies = [ "hashbrown", ] [[package]] -name = "heck" -version = "0.3.3" +name = "headers" +version = "0.3.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f3e372db8e5c0d213e0cd0b9be18be2aca3d44cf2fe30a9d46a65581cd454584" +dependencies = [ + "base64", + "bitflags", + "bytes", + "headers-core", + "http", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6d621efb26863f0e9924c6ac577e8275e5e6b77455db64ffa6c65c904e9e132c" +checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "unicode-segmentation", + "http", ] +[[package]] +name = "heck" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2540771e65fc8cb83cd6e8a237f70c319bd5c29f78ed1084ba5d50eeac86f7f9" + [[package]] name = "heed" version = "0.10.6" @@ -897,6 +996,15 @@ dependencies = [ "libc", ] +[[package]] +name = "hmac" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6c49c37c09c17a53d937dfbb742eb3a961d65a994e6bcdcf37e7399d0cc8ab5e" +dependencies = [ + "digest 0.10.5", +] + [[package]] name = "hostname" version = "0.3.1" @@ -910,9 +1018,9 @@ dependencies = [ [[package]] name = "http" -version = "0.2.4" +version = "0.2.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "527e8c9ac747e28542699a951517aa9a6945af506cd1f2e1b53a576c17b6cc11" +checksum = "75f43d41e26995c17e71ee126451dd3941010b0514a81a9d11f3b341debc2399" dependencies = [ "bytes", "fnv", @@ -921,32 +1029,38 @@ dependencies = [ [[package]] name = "http-body" -version = "0.4.3" +version = "0.4.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "399c583b2979440c60be0821a6199eca73bc3c8dcd9d070d75ac726e2c6186e5" +checksum = "d5f38f16d184e36f2408a55281cd658ecbd3ca05cce6d6510a176eca393e26d1" dependencies = [ "bytes", "http", "pin-project-lite", ] +[[package]] +name = "http-range-header" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bfe8eed0a9285ef776bb792479ea3834e8b94e13d615c2f66d03dd50a435a29" + [[package]] name = "httparse" -version = "1.5.1" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acd94fdbe1d4ff688b67b04eee2e17bd50995534a61539e45adfefb45e5e5503" +checksum = "d897f394bad6a705d5f4104762e116a75639e470d80901eed05a860a95cb1904" [[package]] name = "httpdate" -version = "1.0.1" +version = "1.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6456b8a6c8f33fee7d958fcd1b60d55b11940a79e63ae87013e6d22e26034440" +checksum = "c4a1e36c821dbe04574f602848a19f742f4fb3c98d40449f11bcad18d6b17421" [[package]] name = "hyper" -version = "0.14.12" +version = "0.14.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13f67199e765030fa08fe0bd581af683f0d5bc04ea09c2b1102012c5fb90e7fd" +checksum = "02c929dc5c39e335a03c405292728118860721b10190d98c2a0f0efd5baafbac" dependencies = [ "bytes", "futures-channel", @@ -959,7 +1073,7 @@ dependencies = [ "httpdate", "itoa", "pin-project-lite", - "socket2 0.4.1", + "socket2", "tokio", "tower-service", "tracing", @@ -968,17 +1082,15 @@ dependencies = [ [[package]] name = "hyper-rustls" -version = "0.22.1" +version = "0.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f9f7a97316d44c0af9b0301e65010573a853a9fc97046d7331d7f6bc0fd5a64" +checksum = "d87c48c02e0dc5e3b849a2041db3029fd066650f8f717c07bf8ed78ccb895cac" dependencies = [ - "futures-util", + "http", "hyper", - "log", "rustls", "tokio", "tokio-rustls", - "webpki 0.21.4", ] [[package]] @@ -992,18 +1104,27 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "idna" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e14ddfc70884202db2244c223200c204c2bda1bc6e0998d11b5e024d657209e6" +dependencies = [ + "unicode-bidi", + "unicode-normalization", +] + [[package]] name = "image" -version = "0.23.14" +version = "0.24.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ffcb7e7244a9bf19d35bf2883b9c080c4ced3c07a9895572178cdb8f13f6a1" +checksum = "bd8e4fb07cf672b1642304e731ef8a6a4c7891d67bb4fd4f5ce58cd6ed86803c" dependencies = [ "bytemuck", "byteorder", "color_quant", "gif", "jpeg-decoder", - "num-iter", "num-rational", "num-traits", "png", @@ -1011,127 +1132,109 @@ dependencies = [ [[package]] name = "indexmap" -version = "1.7.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc633605454125dec4b66843673f01c7df2b89479b32e0ed634e43a91cff62a5" +checksum = "10a35a97730320ffe8e2d410b5d3b69279b98d2c14bdb8b70ea89ecf7888d41e" dependencies = [ "autocfg", "hashbrown", "serde", ] -[[package]] -name = "indoc" -version = "1.0.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5a75aeaaef0ce18b58056d306c27b07436fbb34b8816c53094b76dd81803136" -dependencies = [ - "unindent", -] - [[package]] name = "inlinable_string" -version = "0.1.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3094308123a0e9fd59659ce45e22de9f53fc1d2ac6e1feb9fef988e4f76cad77" - -[[package]] -name = "instant" -version = "0.1.10" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bee0328b1209d157ef001c94dd85b4f8f64139adb0eac2659f4b08382b2f474d" -dependencies = [ - "cfg-if 1.0.0", -] +checksum = "c8fae54786f62fb2918dcfae3d568594e50eb9b5c25bf04371af6fe7516452fb" [[package]] name = "integer-encoding" -version = "1.1.7" +version = "3.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48dc51180a9b377fd75814d0cc02199c20f8e99433d6762f650d39cdbbd3b56f" +checksum = "8bb03732005da905c88227371639bf1ad885cc712789c011c31c5fb3ab3ccf02" [[package]] name = "ipconfig" -version = "0.2.2" +version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f7e2f18aece9709094573a9f24f483c4f65caa4298e2f7ae1b71cc65d853fad7" +checksum = "723519edce41262b05d4143ceb95050e4c614f483e78e9fd9e39a8275a84ad98" dependencies = [ - "socket2 0.3.19", + "socket2", "widestring", "winapi", - "winreg 0.6.2", + "winreg", ] [[package]] name = "ipnet" -version = "2.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "68f2d64f2edebec4ce84ad108148e67e1064789bee435edc5b60ad398714a3a9" - -[[package]] -name = "itertools" -version = "0.9.0" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "284f18f85651fe11e8a991b2adb42cb078325c996ed026d994719efcfca1d54b" -dependencies = [ - "either", -] +checksum = "879d54834c8c76457ef4293a689b2a8c59b076067ad77b15efafbb05f92a592b" [[package]] name = "itertools" -version = "0.10.1" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69ddb889f9d0d08a67338271fa9b62996bc788c7796a5c18cf057420aaed5eaf" +checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" dependencies = [ "either", ] [[package]] name = "itoa" -version = "0.4.8" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" +checksum = "4217ad341ebadf8d8e724e264f13e593e0648f5b3e94b3896a5df283be015ecc" [[package]] name = "jobserver" -version = "0.1.24" +version = "0.1.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "af25a77299a7f711a01975c35a6a424eb6862092cc2d6c72c4ed6cbc56dfc1fa" +checksum = "068b1ee6743e4d11fb9c6a1e6064b3693a1b600e7f5f5988047d98b3dc9fb90b" dependencies = [ "libc", ] [[package]] name = "jpeg-decoder" -version = "0.1.22" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "229d53d58899083193af11e15917b5640cd40b29ff475a1fe4ef725deb02d0f2" +checksum = "9478aa10f73e7528198d75109c8be5cd7d15fb530238040148d5f9a22d4c5b3b" [[package]] name = "js-sys" -version = "0.3.53" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4bf49d50e2961077d9c99f4b7997d770a1114f087c3c2e0069b36c13fc2979d" +checksum = "49409df3e3bf0856b916e2ceaca09ee28e6871cf7d9ce97a692cacfdb2a25a47" dependencies = [ "wasm-bindgen", ] [[package]] name = "js_int" -version = "0.2.1" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d937f95470b270ce8b8950207715d71aa8e153c0d44c6684d59397ed4949160a" +dependencies = [ + "serde", +] + +[[package]] +name = "js_option" +version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "defaba9bcd19568a4b4b3736b23e368e5b75e3ea126fd4cb3e4ad2ea5af274fd" +checksum = "68421373957a1593a767013698dbf206e2b221eefe97a44d98d18672ff38423c" dependencies = [ "serde", ] [[package]] name = "jsonwebtoken" -version = "7.2.0" +version = "8.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "afabcc15e437a6484fc4f12d0fd63068fe457bf93f1c148d3d9649c60b103f32" +checksum = "1aa4b4af834c6cfd35d8763d359661b90f2e45d8f750a0849156c7f4671af09c" dependencies = [ - "base64 0.12.3", + "base64", "pem", "ring", "serde", @@ -1145,17 +1248,45 @@ version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646" +[[package]] +name = "lazycell" +version = "1.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" + [[package]] name = "libc" -version = "0.2.101" +version = "0.2.134" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "329c933548736bc49fd575ee68c89e8be4d260064184389a5b77517cddd99ffb" + +[[package]] +name = "libloading" +version = "0.7.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3cb00336871be5ed2c8ed44b60ae9959dc5b9f08539422ed43f09e34ecaeba21" +checksum = "efbc0f03f9a775e9f6aed295c6a1ba2253c5757a9e03d55c6caa46a681abcddd" +dependencies = [ + "cfg-if", + "winapi", +] + +[[package]] +name = "librocksdb-sys" +version = "6.20.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c309a9d2470844aceb9a4a098cf5286154d20596868b75a6b36357d2bb9ca25d" +dependencies = [ + "bindgen", + "cc", + "glob", + "libc", +] [[package]] name = "libsqlite3-sys" -version = "0.22.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "290b64917f8b0cb885d9de0f9959fe1f775d7fa12f1da2db9001c1c8ab60f89d" +checksum = "9f0455f2c1bc9a7caa792907026e469c1d91761fb0ea37cbb16427c77280cf35" dependencies = [ "cc", "pkg-config", @@ -1164,15 +1295,15 @@ dependencies = [ [[package]] name = "linked-hash-map" -version = "0.5.4" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fb9b38af92608140b86b693604b9ffcc5824240a484d1ecd4795bacb2fe88f3" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "lmdb-rkv-sys" -version = "0.11.0" +version = "0.11.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b27470ac25167b3afdfb6af8fcd3bc1be67de50ffbdaf4073378cfded6ae24a5" +checksum = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe" dependencies = [ "cc", "libc", @@ -1181,33 +1312,21 @@ dependencies = [ [[package]] name = "lock_api" -version = "0.4.5" +version = "0.4.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "712a4d093c9976e24e7dbca41db895dabcbac38eb5f4045393d17a95bdfb1109" +checksum = "435011366fe56583b16cf956f9df0095b405b82d76425bc8981c0e22e60ec4df" dependencies = [ + "autocfg", "scopeguard", ] [[package]] name = "log" -version = "0.4.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "51b9bbe6c47d51fc3e1a9b945965946b4c44142ab8792c50835a980d362c2710" -dependencies = [ - "cfg-if 1.0.0", -] - -[[package]] -name = "loom" -version = "0.5.1" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2111607c723d7857e0d8299d5ce7a0bf4b844d3e44f8de136b13da513eaf8fc4" +checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e" dependencies = [ - "cfg-if 1.0.0", - "generator", - "scoped-tls", - "serde", - "serde_json", + "cfg-if", ] [[package]] @@ -1233,9 +1352,9 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.0.1" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f099785f7595cc4b4553a174ce30dd7589ef93391ff414dbb67f62392b9e0ce1" +checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" dependencies = [ "regex-automata", ] @@ -1246,17 +1365,23 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a3e378b66a060d48947b590737b30a1be76706c8dd7b8ba0f2fe3989c68a853f" +[[package]] +name = "matchit" +version = "0.5.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "73cbba799671b762df5a175adf59ce145165747bb891505c43d09aefbbf38beb" + [[package]] name = "memchr" -version = "2.4.1" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "308cc39be01b73d0d18f82a0e7b2a3df85245f84af96fdddc5d202d27e47b86a" +checksum = "2dffe52ecf27772e601905b7522cb4ef790d2cc203488bbd0e2fe85fcb74566d" [[package]] name = "memoffset" -version = "0.6.4" +version = "0.6.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59accc507f1338036a0477ef61afdae33cde60840f4dfe481319ce3ad116ddf9" +checksum = "5aa361d4faea93603064a027415f07bd8e1d5c88c9fbf68bf56a285428fd79ce" dependencies = [ "autocfg", ] @@ -1267,71 +1392,58 @@ version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2a60c7ce501c71e03a9c9c0d35b861413ae925bd979cc7a4e30d060069aaac8d" +[[package]] +name = "minimal-lexical" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" + [[package]] name = "miniz_oxide" -version = "0.3.7" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "791daaae1ed6889560f8c4359194f56648355540573244a5448a83ba1ecc7435" +checksum = "96590ba8f175222643a85693f33d26e9c8a015f599c216509b1a6894af675d34" dependencies = [ - "adler32", + "adler", ] [[package]] name = "mio" -version = "0.7.13" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8c2bdb6314ec10835cd3293dd268473a835c02b7b352e788be788b3c6ca6bb16" +checksum = "57ee1c23c7c63b0c9250c339ffdc69255f110b298b901b9f6c82547b7b87caaf" dependencies = [ "libc", "log", - "miow", - "ntapi", - "winapi", -] - -[[package]] -name = "miow" -version = "0.3.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f1c5b025cda876f66ef43a113f91ebc9f4ccef34843000e0adf6ebbab84e21" -dependencies = [ - "winapi", + "wasi 0.11.0+wasi-snapshot-preview1", + "windows-sys", ] [[package]] -name = "multer" -version = "2.0.1" +name = "nom" +version = "7.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "408327e2999b839cd1af003fc01b2019a6c10a1361769542203f6fedc5179680" +checksum = "a8903e5a29a317527874d0402f867152a3d21c908bb0b933e416c65e301d4c36" dependencies = [ - "bytes", - "encoding_rs", - "futures-util", - "http", - "httparse", - "log", - "mime", - "spin 0.9.2", - "tokio", - "tokio-util", - "twoway", - "version_check", + "memchr", + "minimal-lexical", ] [[package]] -name = "ntapi" -version = "0.3.6" +name = "nu-ansi-term" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3f6bb902e437b6d86e03cce10a7e2af662292c5dfef23b65899ea3ac9354ad44" +checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" dependencies = [ + "overload", "winapi", ] [[package]] name = "num-bigint" -version = "0.2.6" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "090c7f9998ee0ff65aa5b723e4009f7b217707f1fb5ea551329cc4d6231fb304" +checksum = "f93ab6289c7b344a8a9f60f88d80aa20032336fe78da341afc91c8a2341fc75f" dependencies = [ "autocfg", "num-integer", @@ -1340,19 +1452,19 @@ dependencies = [ [[package]] name = "num-integer" -version = "0.1.44" +version = "0.1.45" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2cc698a63b549a70bc047073d2949cce27cd1c7b0a4a862d08a8031bc2801db" +checksum = "225d3389fb3509a24c93f5c29eb6bde2586b98d9f016636dff58d7c6f7569cd9" dependencies = [ "autocfg", "num-traits", ] [[package]] -name = "num-iter" -version = "0.1.42" +name = "num-rational" +version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2021c8337a54d21aca0d59a92577a029af9431cb59b909b03252b9c164fad59" +checksum = "0638a1c9d0a3c0914158145bc76cff373a75a627e6ecbfb71cbe6f453a5a19b0" dependencies = [ "autocfg", "num-integer", @@ -1360,40 +1472,38 @@ dependencies = [ ] [[package]] -name = "num-rational" -version = "0.3.2" +name = "num-traits" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "12ac428b1cb17fce6f731001d307d351ec70a6d202fc2e60f7d4c5e42d8f4f07" +checksum = "578ede34cf02f8924ab9447f50c28075b4d3e5b269972345e7e0372b38c6cdcd" dependencies = [ "autocfg", - "num-integer", - "num-traits", ] [[package]] -name = "num-traits" -version = "0.2.14" +name = "num_cpus" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a64b1ec5cda2586e284722486d802acf1f7dbdc623e2bfc57e65ca1cd099290" +checksum = "19e64526ebdee182341572e50e9ad03965aa510cd94427a4549448f285e957a1" dependencies = [ - "autocfg", + "hermit-abi", + "libc", ] [[package]] -name = "num_cpus" -version = "1.13.0" +name = "num_threads" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05499f3756671c15885fee9034446956fff3f243d6077b91e5767df161f766b3" +checksum = "2819ce041d2ee131036f4fc9d6ae7ae125a3a40e97ba64d04fe799ad9dabbb44" dependencies = [ - "hermit-abi", "libc", ] [[package]] name = "once_cell" -version = "1.8.0" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692fcb63b64b1758029e0a96ee63e049ce8c5948587f2f7208df04625e5f6b56" +checksum = "e82dad04139b71a90c080c8463fe0dc7902db5192d939bd0950f074d014339e1" [[package]] name = "opaque-debug" @@ -1403,37 +1513,30 @@ checksum = "624a8340c38c1b80fd549087862da4ba43e08858af025b236e509b6649fc13d5" [[package]] name = "openssl-probe" -version = "0.1.4" +version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28988d872ab76095a6e6ac88d99b54fd267702734fd7ffe610ca27f533ddb95a" +checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "opentelemetry" -version = "0.16.0" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e1cf9b1c4e9a6c4de793c632496fa490bdc0e1eea73f0c91394f7b6990935d22" +checksum = "69d6c3d7288a106c0a363e4b0e8d308058d56902adefb16f4936f417ffef086e" dependencies = [ - "async-trait", - "crossbeam-channel", - "futures", - "js-sys", - "lazy_static", - "percent-encoding", - "pin-project", - "rand 0.8.4", - "thiserror", - "tokio", - "tokio-stream", + "opentelemetry_api", + "opentelemetry_sdk", ] [[package]] name = "opentelemetry-jaeger" -version = "0.15.0" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db22f492873ea037bc267b35a0e8e4fb846340058cb7c864efe3d0bf23684593" +checksum = "1e785d273968748578931e4dc3b4f5ec86b26e09d9e0d66b55adda7fce742f7a" dependencies = [ "async-trait", - "lazy_static", + "futures", + "futures-executor", + "once_cell", "opentelemetry", "opentelemetry-semantic-conventions", "thiserror", @@ -1443,62 +1546,107 @@ dependencies = [ [[package]] name = "opentelemetry-semantic-conventions" -version = "0.8.0" +version = "0.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffeac823339e8b0f27b961f4385057bf9f97f2863bc745bd015fd6091f2270e9" +checksum = "9b02e0230abb0ab6636d18e2ba8fa02903ea63772281340ccac18e0af3ec9eeb" dependencies = [ "opentelemetry", ] [[package]] -name = "ordered-float" -version = "1.1.1" +name = "opentelemetry_api" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +checksum = "c24f96e21e7acc813c7a8394ee94978929db2bcc46cf6b5014fc612bf7760c22" dependencies = [ - "num-traits", + "futures-channel", + "futures-util", + "indexmap", + "js-sys", + "once_cell", + "pin-project-lite", + "thiserror", ] [[package]] -name = "page_size" -version = "0.4.2" +name = "opentelemetry_sdk" +version = "0.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd" +checksum = "1ca41c4933371b61c2a2f214bf16931499af4ec90543604ec828f7a625c09113" dependencies = [ - "libc", - "winapi", + "async-trait", + "crossbeam-channel", + "futures-channel", + "futures-executor", + "futures-util", + "once_cell", + "opentelemetry_api", + "percent-encoding", + "rand 0.8.5", + "thiserror", + "tokio", + "tokio-stream", ] [[package]] -name = "parking_lot" -version = "0.11.2" +name = "ordered-float" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +checksum = "3305af35278dd29f46fcdd139e0b1fbfae2153f0e5928b39b035542dd31e37b7" +dependencies = [ + "num-traits", +] + +[[package]] +name = "os_str_bytes" +version = "6.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ff7415e9ae3fff1225851df9e0d9e4e5479f947619774677a63572e55e80eff" + +[[package]] +name = "overload" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" + +[[package]] +name = "page_size" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd" +dependencies = [ + "libc", + "winapi", +] + +[[package]] +name = "parking_lot" +version = "0.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ - "instant", "lock_api", "parking_lot_core", ] [[package]] name = "parking_lot_core" -version = "0.8.5" +version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d76e8e1493bcac0d2766c42737f34458f1c8c50c0d23bcb24ea953affb273216" +checksum = "09a279cbf25cb0757810394fbc1e359949b59e348145c643a939a525692e6929" dependencies = [ - "cfg-if 1.0.0", - "instant", + "cfg-if", "libc", "redox_syscall", "smallvec", - "winapi", + "windows-sys", ] [[package]] name = "paste" -version = "1.0.5" +version = "1.0.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acbf547ad0c65e31259204bd90935776d1c693cec2f4ff7abb7a1bbbd40dfe58" +checksum = "b1de2e551fb905ac83f73f7aedf2f0cb4a0da7e35efa24a202a936269f1f18e1" [[package]] name = "pear" @@ -1523,37 +1671,57 @@ dependencies = [ "syn", ] +[[package]] +name = "peeking_take_while" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099" + [[package]] name = "pem" -version = "0.8.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd56cbd21fea48d0c440b41cd69c589faacade08c992d9a54e471b79d0fd13eb" +checksum = "03c64931a1a212348ec4f3b4362585eca7159d0d09cbdf4a7f74f02173596fd4" dependencies = [ - "base64 0.13.0", - "once_cell", - "regex", + "base64", ] [[package]] name = "percent-encoding" -version = "2.1.0" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d4fd5641d01c8f18a23da7b6fe29298ff4b55afcccdf78973b24cf3175fee32e" +checksum = "478c572c3d73181ff3c2539045f6eb99e5491218eae919370993b890cdbdd98e" + +[[package]] +name = "persy" +version = "1.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5511189f4dbd737283b0dd2ff6715f2e35fd0d3e1ddf953ed6a772e439e1f73f" +dependencies = [ + "crc", + "data-encoding", + "fs2", + "linked-hash-map", + "rand 0.8.5", + "thiserror", + "unsigned-varint", + "zigzag", +] [[package]] name = "pin-project" -version = "1.0.8" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "576bc800220cc65dac09e99e97b08b358cfab6e17078de8dc5fee223bd2d0c08" +checksum = "ad29a609b6bcd67fee905812e544992d216af9d755757c05ed2d0e15a74c6ecc" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.0.8" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e8fe8163d14ce7f0cdac2e040116f22eac817edabff0be91e8aff7e9accf389" +checksum = "069bdb1e05adc7a8990dce9cc75370895fbe4e3d58b9b73bf1aee56359344a55" dependencies = [ "proc-macro2", "quote", @@ -1562,9 +1730,9 @@ dependencies = [ [[package]] name = "pin-project-lite" -version = "0.2.7" +version = "0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d31d11c69a6b52a174b42bdc0c30e5e11670f90788b2c471c31c1d17d449443" +checksum = "e0a7ae3ac2f1173085d398531c705756c94a4c56843785df85a60c1a0afac116" [[package]] name = "pin-utils" @@ -1574,68 +1742,80 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pkcs8" -version = "0.7.5" +version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fbee84ed13e44dd82689fa18348a49934fa79cc774a344c42fc9b301c71b140a" +checksum = "9eca2c590a5f85da82668fa685c09ce2888b9430e83299debf1f34b65fd4a4ba" dependencies = [ "der", "spki", - "zeroize", ] [[package]] name = "pkg-config" -version = "0.3.19" +version = "0.3.25" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3831453b3449ceb48b6d9c7ad7c96d5ea673e9b470a1dc578c2ce6521230884c" +checksum = "1df8c4ec4b0627e53bdf214615ad287367e482558cf84b109250b37464dc03ae" [[package]] name = "png" -version = "0.16.8" +version = "0.17.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3c3287920cb847dee3de33d301c463fba14dda99db24214ddf93f83d3021f4c6" +checksum = "8f0e7f4c94ec26ff209cee506314212639d6c91b80afb82984819fafce9df01c" dependencies = [ "bitflags", "crc32fast", - "deflate", + "flate2", "miniz_oxide", ] [[package]] name = "ppv-lite86" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac74c624d6b2d21f425f752262f42188365d7b8ff1aff74c82e45136510a4857" +checksum = "eb9f9e6e233e5c4a35559a617bf40a4ec447db2e84c20b55a6f83167b7e57872" [[package]] name = "proc-macro-crate" -version = "1.0.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "41fdbd1df62156fbc5945f4762632564d7d038153091c3fcf1067f6aef7cff92" +checksum = "eda0fc3b0fb7c975631757e14d9049da17374063edb6ebbcbc54d880d4fe94e9" dependencies = [ + "once_cell", "thiserror", "toml", ] [[package]] -name = "proc-macro-hack" -version = "0.5.19" +name = "proc-macro-error" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dbf0c48bc1d91375ae5c3cd81e3722dff1abcf81a30960240640d223f59fe0e5" +checksum = "da25490ff9892aab3fcf7c36f08cfb902dd3e71ca0f9f9517bea02a73a5ce38c" +dependencies = [ + "proc-macro-error-attr", + "proc-macro2", + "quote", + "syn", + "version_check", +] [[package]] -name = "proc-macro-nested" -version = "0.1.7" +name = "proc-macro-error-attr" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc881b2c22681370c6a780e47af9840ef841837bc98118431d4e1868bd0c1086" +checksum = "a1be40180e52ecc98ad80b184934baf3d0d29f979574e439af5a55274b35f869" +dependencies = [ + "proc-macro2", + "quote", + "version_check", +] [[package]] name = "proc-macro2" -version = "1.0.29" +version = "1.0.46" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f5105d4fdaab20335ca9565e106a5d9b82b6219b5ba735731124ac6711d23d" +checksum = "94e2ef8dbfc347b10c094890f778ee2e36ca9bb4262e86dc99cd217e35f3470b" dependencies = [ - "unicode-xid", + "unicode-ident", ] [[package]] @@ -1659,9 +1839,9 @@ checksum = "a1d01941d82fa2ab50be1e79e6714289dd7cde78eba4c074bc5a4374f650dfe0" [[package]] name = "quote" -version = "1.0.9" +version = "1.0.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3d0b9745dc2debf507c8422de05d7226cc1f0644216dfdfead988f9b1ab32a7" +checksum = "bbe448f377a7d6961e30f5955f9b8d106c3f5e449d493ee1b125c1d43c2b5179" dependencies = [ "proc-macro2", ] @@ -1676,19 +1856,18 @@ dependencies = [ "libc", "rand_chacha 0.2.2", "rand_core 0.5.1", - "rand_hc 0.2.0", + "rand_hc", ] [[package]] name = "rand" -version = "0.8.4" +version = "0.8.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e7573632e6454cf6b99d7aac4ccca54be06da05aca2ef7423d22d27d4d4bcd8" +checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha 0.3.1", - "rand_core 0.6.3", - "rand_hc 0.3.1", + "rand_core 0.6.4", ] [[package]] @@ -1708,7 +1887,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core 0.6.3", + "rand_core 0.6.4", ] [[package]] @@ -1722,11 +1901,11 @@ dependencies = [ [[package]] name = "rand_core" -version = "0.6.3" +version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d34f1408f55294453790c48b2f1ebbb1c5b4b7563eb1f418bcfcfdbb06ebb4e7" +checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.7", ] [[package]] @@ -1738,59 +1917,31 @@ dependencies = [ "rand_core 0.5.1", ] -[[package]] -name = "rand_hc" -version = "0.3.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d51e9f596de227fda2ea6c84607f5558e196eeaf43c986b724ba4fb8fdf497e7" -dependencies = [ - "rand_core 0.6.3", -] - [[package]] name = "redox_syscall" -version = "0.2.10" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8383f39639269cde97d255a32bdb68c047337295414940c68bdd30c2e13203ff" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" dependencies = [ "bitflags", ] [[package]] name = "redox_users" -version = "0.4.0" +version = "0.4.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "528532f3d801c87aec9def2add9ca802fe569e44a544afe633765267840abe64" +checksum = "b033d837a7cf162d7993aded9304e30a83213c648b6e389db233191f891e5c2b" dependencies = [ - "getrandom 0.2.3", + "getrandom 0.2.7", "redox_syscall", -] - -[[package]] -name = "ref-cast" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "300f2a835d808734ee295d45007adacb9ebb29dd3ae2424acfa17930cae541da" -dependencies = [ - "ref-cast-impl", -] - -[[package]] -name = "ref-cast-impl" -version = "1.0.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c38e3aecd2b21cb3959637b883bb3714bc7e43f0268b9a29d3743ee3e55cdd2" -dependencies = [ - "proc-macro2", - "quote", - "syn", + "thiserror", ] [[package]] name = "regex" -version = "1.5.4" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d07a8629359eb56f1e2fb1652bb04212c072a87ba68546a04065d525673ac461" +checksum = "4c4eb3267174b8c6c2f654116623910a0fef09c4753f8dd83db29c48a0df988b" dependencies = [ "aho-corasick", "memchr", @@ -1808,30 +1959,21 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.6.25" +version = "0.6.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f497285884f3fcff424ffc933e56d7cbca511def0c9831a7f9b5f6153e3cc89b" - -[[package]] -name = "remove_dir_all" -version = "0.5.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3acd125665422973a33ac9d3dd2df85edad0f4ae9b00dafb1a05e43a9f5ef8e7" -dependencies = [ - "winapi", -] +checksum = "a3f87b73ce11b1619a3c6332f45341e0047173771e8b8b73f87bfeefb7b56244" [[package]] name = "reqwest" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246e9f61b9bb77df069a947682be06e31ac43ea37862e244a69f177694ea6d22" +version = "0.11.9" +source = "git+https://github.com/timokoesters/reqwest?rev=57b7cf4feb921573dfafad7d34b9ac6e44ead0bd#57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" dependencies = [ - "base64 0.13.0", + "base64", "bytes", "encoding_rs", "futures-core", "futures-util", + "h2", "http", "http-body", "hyper", @@ -1845,7 +1987,9 @@ dependencies = [ "pin-project-lite", "rustls", "rustls-native-certs", + "rustls-pemfile 0.2.1", "serde", + "serde_json", "serde_urlencoded", "tokio", "tokio-rustls", @@ -1854,7 +1998,7 @@ dependencies = [ "wasm-bindgen", "wasm-bindgen-futures", "web-sys", - "winreg 0.7.0", + "winreg", ] [[package]] @@ -1876,161 +2020,54 @@ dependencies = [ "cc", "libc", "once_cell", - "spin 0.5.2", + "spin", "untrusted", "web-sys", "winapi", ] [[package]] -name = "rocket" -version = "0.5.0-rc.1" +name = "rocksdb" +version = "0.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a71c18c42a0eb15bf3816831caf0dad11e7966f2a41aaf486a701979c4dd1f2" +checksum = "7a62eca5cacf2c8261128631bed9f045598d40bfbe4b29f5163f0f802f8f44a7" dependencies = [ - "async-stream", - "async-trait", - "atomic", - "atty", - "binascii", - "bytes", - "either", - "figment", - "futures", - "indexmap", - "log", - "memchr", - "multer", - "num_cpus", - "parking_lot", - "pin-project-lite", - "rand 0.8.4", - "ref-cast", - "rocket_codegen", - "rocket_http", - "serde", - "state", - "tempfile", - "time 0.2.27", - "tokio", - "tokio-stream", - "tokio-util", - "ubyte", - "version_check", - "yansi", -] - -[[package]] -name = "rocket_codegen" -version = "0.5.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "66f5fa462f7eb958bba8710c17c5d774bbbd59809fa76fb1957af7e545aea8bb" -dependencies = [ - "devise", - "glob", - "indexmap", - "proc-macro2", - "quote", - "rocket_http", - "syn", - "unicode-xid", -] - -[[package]] -name = "rocket_http" -version = "0.5.0-rc.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23c8b7d512d2fcac2316ebe590cde67573844b99e6cc9ee0f53375fa16e25ebd" -dependencies = [ - "cookie", - "either", - "http", - "hyper", - "indexmap", - "log", - "memchr", - "mime", - "parking_lot", - "pear", - "percent-encoding", - "pin-project-lite", - "ref-cast", - "serde", - "smallvec", - "stable-pattern", - "state", - "time 0.2.27", - "tokio", - "tokio-rustls", - "uncased", + "libc", + "librocksdb-sys", ] [[package]] name = "ruma" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.7.4" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "assign", "js_int", - "ruma-api", + "js_option", "ruma-appservice-api", "ruma-client-api", "ruma-common", - "ruma-events", "ruma-federation-api", - "ruma-identifiers", "ruma-identity-service-api", "ruma-push-gateway-api", - "ruma-serde", "ruma-signatures", "ruma-state-res", ] -[[package]] -name = "ruma-api" -version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" -dependencies = [ - "bytes", - "http", - "percent-encoding", - "ruma-api-macros", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "ruma-api-macros" -version = "0.18.5" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-appservice-api" -version = "0.4.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.7.0" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ - "ruma-api", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] [[package]] name = "ruma-client-api" -version = "0.12.3" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.15.1" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "assign", "bytes", @@ -2038,186 +2075,118 @@ dependencies = [ "js_int", "maplit", "percent-encoding", - "ruma-api", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] [[package]] name = "ruma-common" -version = "0.6.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.10.3" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ + "base64", + "bytes", + "form_urlencoded", + "http", "indexmap", + "itoa", "js_int", - "ruma-identifiers", - "ruma-serde", + "js_option", + "percent-encoding", + "rand 0.8.5", + "regex", + "ruma-identifiers-validation", + "ruma-macros", "serde", "serde_json", + "thiserror", "tracing", + "url", + "uuid", "wildmatch", ] -[[package]] -name = "ruma-events" -version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" -dependencies = [ - "indoc", - "js_int", - "ruma-common", - "ruma-events-macros", - "ruma-identifiers", - "ruma-serde", - "serde", - "serde_json", - "thiserror", -] - -[[package]] -name = "ruma-events-macros" -version = "0.24.6" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-federation-api" -version = "0.3.1" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", - "ruma-api", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", ] -[[package]] -name = "ruma-identifiers" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" -dependencies = [ - "paste", - "percent-encoding", - "rand 0.8.4", - "ruma-identifiers-macros", - "ruma-identifiers-validation", - "ruma-serde", - "ruma-serde-macros", - "serde", -] - -[[package]] -name = "ruma-identifiers-macros" -version = "0.20.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" -dependencies = [ - "quote", - "ruma-identifiers-validation", - "syn", -] - [[package]] name = "ruma-identifiers-validation" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.9.0" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ + "js_int", "thiserror", ] [[package]] name = "ruma-identity-service-api" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ "js_int", - "ruma-api", "ruma-common", - "ruma-identifiers", - "ruma-serde", "serde", ] [[package]] -name = "ruma-push-gateway-api" -version = "0.3.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +name = "ruma-macros" +version = "0.10.3" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ - "js_int", - "ruma-api", - "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", + "once_cell", + "proc-macro-crate", + "proc-macro2", + "quote", + "ruma-identifiers-validation", "serde", - "serde_json", + "syn", + "toml", ] [[package]] -name = "ruma-serde" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +name = "ruma-push-gateway-api" +version = "0.6.0" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ - "bytes", - "form_urlencoded", - "itoa", "js_int", - "ruma-serde-macros", + "ruma-common", "serde", "serde_json", ] -[[package]] -name = "ruma-serde-macros" -version = "0.5.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" -dependencies = [ - "proc-macro-crate", - "proc-macro2", - "quote", - "syn", -] - [[package]] name = "ruma-signatures" -version = "0.9.0" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.12.0" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ - "base64 0.13.0", + "base64", "ed25519-dalek", "pkcs8", "rand 0.7.3", - "ruma-identifiers", - "ruma-serde", + "ruma-common", "serde_json", "sha2", + "subslice", "thiserror", - "tracing", ] [[package]] name = "ruma-state-res" -version = "0.4.1" -source = "git+https://github.com/ruma/ruma?rev=44cfd0adbc83303c19aef590ad0d71647e19f197#44cfd0adbc83303c19aef590ad0d71647e19f197" +version = "0.8.0" +source = "git+https://github.com/ruma/ruma?rev=fba6f70c2df8294f96567f56464a46e3d237a8e9#fba6f70c2df8294f96567f56464a46e3d237a8e9" dependencies = [ - "itertools 0.10.1", + "itertools", "js_int", "ruma-common", - "ruma-events", - "ruma-identifiers", - "ruma-serde", "serde", "serde_json", "thiserror", @@ -2226,93 +2195,94 @@ dependencies = [ [[package]] name = "rusqlite" -version = "0.25.3" +version = "0.28.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "57adcf67c8faaf96f3248c2a7b419a0dbc52ebe36ba83dd57fe83827c1ea4eb3" +checksum = "01e213bc3ecb39ac32e81e51ebe31fd888a940515173e3a18a35f8c6e896422a" dependencies = [ "bitflags", "fallible-iterator", "fallible-streaming-iterator", "hashlink", "libsqlite3-sys", - "memchr", "smallvec", ] [[package]] name = "rust-argon2" -version = "0.8.3" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4b18820d944b33caa75a71378964ac46f58517c92b6ae5f762636247c09e78fb" +checksum = "b50162d19404029c1ceca6f6980fe40d45c8b369f6f44446fa14bb39573b5bb9" dependencies = [ - "base64 0.13.0", + "base64", "blake2b_simd", "constant_time_eq", - "crossbeam-utils 0.8.5", + "crossbeam-utils", ] [[package]] -name = "rustc_version" -version = "0.2.3" +name = "rustc-hash" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a" -dependencies = [ - "semver", -] +checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" [[package]] name = "rustls" -version = "0.19.1" +version = "0.20.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35edb675feee39aec9c99fa5ff985081995a06d594114ae14cbe797ad7b7a6d7" +checksum = "5aab8ee6c7097ed6057f43c187a62418d0c05a4bd5f18b3571db50ee0f9ce033" dependencies = [ - "base64 0.13.0", "log", "ring", "sct", - "webpki 0.21.4", + "webpki", ] [[package]] name = "rustls-native-certs" -version = "0.5.0" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a07b7c1885bd8ed3831c289b7870b13ef46fe0e856d288c30d9cc17d75a2092" +checksum = "0167bac7a9f490495f3c33013e7722b53cb087ecbe082fb0c6387c96f634ea50" dependencies = [ "openssl-probe", - "rustls", + "rustls-pemfile 1.0.1", "schannel", "security-framework", ] [[package]] -name = "rustversion" -version = "1.0.5" +name = "rustls-pemfile" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b3909d758bb75c79f23d4736fac9433868679d3ad2ea7a61e3c25cfda9a088" +checksum = "5eebeaeb360c87bfb72e84abdb3447159c0eaececf1bef2aecd65a8be949d1c9" +dependencies = [ + "base64", +] + +[[package]] +name = "rustls-pemfile" +version = "1.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0864aeff53f8c05aa08d86e5ef839d3dfcf07aeba2db32f12db0ef716e87bd55" +dependencies = [ + "base64", +] [[package]] name = "ryu" -version = "1.0.5" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "71d301d4193d031abdd79ff7e3dd721168a9572ef3fe51a1517aba235bd8f86e" +checksum = "4501abdff3ae82a1c1b477a17252eb69cee9e66eb915c1abaa4f44d873df9f09" [[package]] name = "schannel" -version = "0.1.19" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f05ba609c234e60bee0d547fe94a4c7e9da733d1c962cf6e59efa4cd9c8bc75" +checksum = "88d6731146462ea25d9244b2ed5fd1d716d25c52e4d54aa4fb0f3c4e9854dbe2" dependencies = [ "lazy_static", - "winapi", + "windows-sys", ] -[[package]] -name = "scoped-tls" -version = "1.0.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ea6a9290e3c9cf0f18145ef7ffa62d68ee0bf5fcd651017e586dc7fd5da448c2" - [[package]] name = "scopeguard" version = "1.1.0" @@ -2321,9 +2291,9 @@ checksum = "d29ab0c6d3fc0ee92fe66e2d99f700eab17a8d57d1c1d3b748380fb20baa78cd" [[package]] name = "sct" -version = "0.6.1" +version = "0.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b362b83898e0e69f38515b82ee15aa80636befe47c3b6d3d89a911e78fc228ce" +checksum = "d53dcdb7c9f8158937a7981b48accfd39a43af418591a5d008c7b22b5e1b7ca4" dependencies = [ "ring", "untrusted", @@ -2331,9 +2301,9 @@ dependencies = [ [[package]] name = "security-framework" -version = "2.4.2" +version = "2.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "525bc1abfda2e1998d152c45cf13e696f76d0a4972310b22fac1658b05df7c87" +checksum = "2bc1bb97804af6631813c55739f771071e0f2ed33ee20b68c86ec505d906356c" dependencies = [ "bitflags", "core-foundation", @@ -2344,43 +2314,28 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.4.2" +version = "2.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9dd14d83160b528b7bfd66439110573efcfbe281b17fc2ca9f39f550d619c7e" +checksum = "0160a13a177a45bfb43ce71c01580998474f556ad854dcbca936dd2841a5c556" dependencies = [ "core-foundation-sys", "libc", ] -[[package]] -name = "semver" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d7eb9ef2c18661902cc47e535f9bc51b78acd254da71d375c2f6720d9a40403" -dependencies = [ - "semver-parser", -] - -[[package]] -name = "semver-parser" -version = "0.7.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "388a1df253eca08550bef6c72392cfe7c30914bf41df5269b68cbd6ff8f570a3" - [[package]] name = "serde" -version = "1.0.130" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f12d06de37cf59146fbdecab66aa99f9fe4f78722e3607577a5375d66bd0c913" +checksum = "728eb6351430bccb993660dfffc5a72f91ccc1295abaa8ce19b27ebe4f75568b" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.130" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d7bc1a1ab1961464eae040d96713baa5a724a8152c1222492465b54322ec508b" +checksum = "81fa1584d3d1bcacd84c277a0dfe21f5b0f6accf4a23d04d4c6d61f1af522b4c" dependencies = [ "proc-macro2", "quote", @@ -2389,9 +2344,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.67" +version = "1.0.85" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a7f9e390c27c3c0ce8bc5d725f6e4d30a29d26659494aa4b17535f7522c5c950" +checksum = "e55a28e3aaef9d5ce0506d0a14dbba8054ddc7e499ef522dd8b26859ec9d4a44" dependencies = [ "itoa", "ryu", @@ -2400,9 +2355,9 @@ dependencies = [ [[package]] name = "serde_urlencoded" -version = "0.7.0" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edfa57a7f8d9c1d260a549e7224100f6c43d43f9103e06dd8b4095a9b2b43ce9" +checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", "itoa", @@ -2412,44 +2367,67 @@ dependencies = [ [[package]] name = "serde_yaml" -version = "0.8.20" +version = "0.9.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad104641f3c958dab30eb3010e834c2622d1f3f4c530fef1dee20ad9485f3c09" +checksum = "8613d593412a0deb7bbd8de9d908efff5a0cb9ccd8f62c641e7b2ed2f57291d1" dependencies = [ - "dtoa", "indexmap", + "itoa", + "ryu", "serde", - "yaml-rust", + "unsafe-libyaml", +] + +[[package]] +name = "sha-1" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "028f48d513f9678cda28f6e4064755b3fbb2af6acd672f2c209b62323f7aea0f" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", ] [[package]] name = "sha1" -version = "0.6.0" +version = "0.10.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2579985fda508104f7587689507983eadd6a6e84dd35d6d115361f530916fa0d" +checksum = "f04293dc80c3993519f2d7f6f511707ee7094fe0c6d3406feb330cdb3540eba3" +dependencies = [ + "cfg-if", + "cpufeatures", + "digest 0.10.5", +] [[package]] name = "sha2" -version = "0.9.6" +version = "0.9.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9204c41a1597a8c5af23c82d1c921cb01ec0a4c59e07a9c7306062829a3903f3" +checksum = "4d58a1e1bf39749807d89cf2d98ac2dfa0ff1cb3faa38fbb64dd88ac8013d800" dependencies = [ - "block-buffer", - "cfg-if 1.0.0", + "block-buffer 0.9.0", + "cfg-if", "cpufeatures", - "digest", + "digest 0.9.0", "opaque-debug", ] [[package]] name = "sharded-slab" -version = "0.1.3" +version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "740223c51853f3145fe7c90360d2d4232f2b62e3449489c207eccde818979982" +checksum = "900fba806f70c630b0a382d0d825e17a0f19fcd059a2ade1ff237bcddf446b31" dependencies = [ "lazy_static", ] +[[package]] +name = "shlex" +version = "1.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "43b2853a4d09f215c24cc5489c992ce46052d359b5109343cbafbf26bc62f8a3" + [[package]] name = "signal-hook-registry" version = "1.4.0" @@ -2461,167 +2439,71 @@ dependencies = [ [[package]] name = "signature" -version = "1.3.1" +version = "1.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c19772be3c4dd2ceaacf03cb41d5885f2a02c4d8804884918e3a258480803335" +checksum = "74233d3b3b2f6d4b006dc19dee745e73e2a6bfb6f93607cd3b02bd5b00797d7c" [[package]] name = "simple_asn1" -version = "0.4.1" +version = "0.6.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "692ca13de57ce0613a363c8c2f1de925adebc81b04c923ac60c5488bb44abe4b" +checksum = "adc4e5204eb1910f40f9cfa375f6f05b68c3abac4b6fd879c8ff5e7ae8a0a085" dependencies = [ - "chrono", "num-bigint", "num-traits", + "thiserror", + "time", ] [[package]] name = "slab" -version = "0.4.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c307a32c1c5c437f38c7fd45d753050587732ba8628319fbdf12a7e289ccc590" - -[[package]] -name = "sled" -version = "0.34.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d0132f3e393bcb7390c60bb45769498cf4550bcb7a21d7f95c02b69f6362cdc" -dependencies = [ - "crc32fast", - "crossbeam-epoch", - "crossbeam-utils 0.8.5", - "fs2", - "fxhash", - "libc", - "log", - "parking_lot", - "zstd", -] - -[[package]] -name = "smallvec" -version = "1.6.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe0f37c9e8f3c5a4a66ad655a93c74daac4ad00c441533bf5c6e7990bb42604e" - -[[package]] -name = "socket2" -version = "0.3.19" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "122e570113d28d773067fab24266b66753f6ea915758651696b6e35e49f88d6e" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "winapi", -] - -[[package]] -name = "socket2" -version = "0.4.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "765f090f0e423d2b55843402a07915add955e7d60657db13707a159727326cad" -dependencies = [ - "libc", - "winapi", -] - -[[package]] -name = "spin" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" - -[[package]] -name = "spin" -version = "0.9.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "511254be0c5bcf062b019a6c89c01a664aa359ded62f78aa72c6fc137c0590e5" - -[[package]] -name = "spki" -version = "0.4.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "987637c5ae6b3121aba9d513f869bd2bff11c4cc086c22473befd6649c0bd521" -dependencies = [ - "der", -] - -[[package]] -name = "stable-pattern" -version = "0.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4564168c00635f88eaed410d5efa8131afa8d8699a612c80c455a0ba05c21045" -dependencies = [ - "memchr", -] - -[[package]] -name = "standback" -version = "0.2.17" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e113fb6f3de07a243d434a56ec6f186dfd51cb08448239fe7bcae73f87ff28ff" +checksum = "4614a76b2a8be0058caa9dbbaf66d988527d86d003c11a94fbd335d7661edcef" dependencies = [ - "version_check", + "autocfg", ] [[package]] -name = "state" -version = "0.5.2" +name = "smallvec" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87cf4f5369e6d3044b5e365c9690f451516ac8f0954084622b49ea3fde2f6de5" -dependencies = [ - "loom", -] +checksum = "a507befe795404456341dfab10cef66ead4c041f62b8b11bbb92bffe5d0953e0" [[package]] -name = "stdweb" -version = "0.4.20" +name = "socket2" +version = "0.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d022496b16281348b52d0e30ae99e01a73d737b2f45d38fed4edf79f9325a1d5" +checksum = "02e2d2db9033d13a1567121ddd7a095ee144db4e1ca1b1bda3419bc0da294ebd" dependencies = [ - "discard", - "rustc_version", - "stdweb-derive", - "stdweb-internal-macros", - "stdweb-internal-runtime", - "wasm-bindgen", + "libc", + "winapi", ] [[package]] -name = "stdweb-derive" -version = "0.5.3" +name = "spin" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c87a60a40fccc84bef0652345bbbbbe20a605bf5d0ce81719fc476f5c03b50ef" -dependencies = [ - "proc-macro2", - "quote", - "serde", - "serde_derive", - "syn", -] +checksum = "6e63cff320ae2c57904679ba7cb63280a3dc4613885beafb148ee7bf9aa9042d" [[package]] -name = "stdweb-internal-macros" -version = "0.2.9" +name = "spki" +version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "58fa5ff6ad0d98d1ffa8cb115892b6e69d67799f6763e162a1c9db421dc22e11" +checksum = "67cf02bbac7a337dc36e4f5a693db6c21e7863f45070f7064577eb4367a3212b" dependencies = [ - "base-x", - "proc-macro2", - "quote", - "serde", - "serde_derive", - "serde_json", - "sha1", - "syn", + "base64ct", + "der", ] [[package]] -name = "stdweb-internal-runtime" -version = "0.1.5" +name = "subslice" +version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "213701ba3370744dcd1a12960caa4843b3d68b4d1c0a5d575e0d65b2ee9d16c0" +checksum = "e0a8e4809a3bb02de01f1f7faf1ba01a83af9e8eabcd4d31dd6e413d14d56aae" +dependencies = [ + "memchr", +] [[package]] name = "subtle" @@ -2631,29 +2513,35 @@ checksum = "6bdef32e8150c2a081110b42772ffe7d7c9032b606bc226c8260fd97e0976601" [[package]] name = "syn" -version = "1.0.75" +version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7f58f7e8eaa0009c5fec437aabf511bd9933e4b2d7407bd05273c01a8906ea7" +checksum = "3fcd952facd492f9be3ef0d0b7032a6e442ee9b361d4acc2b1d0c4aaa5f613a1" dependencies = [ "proc-macro2", "quote", - "unicode-xid", + "unicode-ident", ] +[[package]] +name = "sync_wrapper" +version = "0.1.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "20518fe4a4c9acf048008599e464deb21beeae3d3578418951a189c235a7a9a8" + [[package]] name = "synchronoise" -version = "1.0.0" +version = "1.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d717ed0efc9d39ab3b642a096bc369a3e02a38a51c41845d7fe31bdad1d6eaeb" +checksum = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2" dependencies = [ - "crossbeam-queue 0.1.2", + "crossbeam-queue", ] [[package]] name = "synstructure" -version = "0.12.5" +version = "0.12.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "474aaa926faa1603c40b7885a9eaea29b444d1cb2850cb7c0e37bb1a4182f4fa" +checksum = "f36bdaa60a83aca3921b5259d5400cbf5e90fc51931376a9bd4a0eb79aa7210f" dependencies = [ "proc-macro2", "quote", @@ -2661,34 +2549,20 @@ dependencies = [ "unicode-xid", ] -[[package]] -name = "tempfile" -version = "3.2.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dac1c663cfc93810f88aed9b8941d48cabf856a1b111c29a40439018d870eb22" -dependencies = [ - "cfg-if 1.0.0", - "libc", - "rand 0.8.4", - "redox_syscall", - "remove_dir_all", - "winapi", -] - [[package]] name = "thiserror" -version = "1.0.28" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "283d5230e63df9608ac7d9691adc1dfb6e701225436eb64d0b9a7f0a5a04f6ec" +checksum = "10deb33631e3c9018b9baf9dcbbc4f737320d2b576bac10f6aefa048fa407e3e" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.28" +version = "1.0.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa3884228611f5cd3608e2d409bf7dce832e4eb3135e3f11addbd7e41bd68e71" +checksum = "982d17546b47146b28f7c22e3d08465f6b8903d0ea13c1660d9d84a6e7adcdbb" dependencies = [ "proc-macro2", "quote", @@ -2697,9 +2571,9 @@ dependencies = [ [[package]] name = "thread_local" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8018d24e04c95ac8790716a5987d0fec4f8b27249ffa0f7d33f1369bdfb88cbd" +checksum = "5516c27b78311c50bf42c071425c560ac799b11c30b31f87e3081965fe5e0180" dependencies = [ "once_cell", ] @@ -2715,9 +2589,9 @@ dependencies = [ [[package]] name = "thrift" -version = "0.13.0" +version = "0.16.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c6d965454947cc7266d22716ebfd07b18d84ebaf35eec558586bbb2a8cb6b5b" +checksum = "09678c4cdbb4eed72e18b7c2af1329c69825ed16fcbac62d083fc3e2b0590ff0" dependencies = [ "byteorder", "integer-encoding", @@ -2727,58 +2601,60 @@ dependencies = [ ] [[package]] -name = "time" -version = "0.1.43" +name = "tikv-jemalloc-ctl" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ca8a50ef2360fbd1eeb0ecd46795a87a19024eb4b53c5dc916ca1fd95fe62438" +checksum = "e37706572f4b151dff7a0146e040804e9c26fe3a3118591112f05cf12a4216c1" dependencies = [ "libc", - "winapi", + "paste", + "tikv-jemalloc-sys", ] [[package]] -name = "time" -version = "0.2.27" +name = "tikv-jemalloc-sys" +version = "0.5.2+5.3.0-patched" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4752a97f8eebd6854ff91f1c1824cd6160626ac4bd44287f7f4ea2035a02a242" +checksum = "ec45c14da997d0925c7835883e4d5c181f196fa142f8c19d7643d1e9af2592c3" dependencies = [ - "const_fn", + "cc", + "fs_extra", "libc", - "standback", - "stdweb", - "time-macros", - "version_check", - "winapi", ] [[package]] -name = "time-macros" -version = "0.1.1" +name = "tikv-jemallocator" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "957e9c6e26f12cb6d0dd7fc776bb67a706312e7299aed74c8dd5b17ebb27e2f1" +checksum = "20612db8a13a6c06d57ec83953694185a367e16945f66565e8028d2c0bd76979" dependencies = [ - "proc-macro-hack", - "time-macros-impl", + "libc", + "tikv-jemalloc-sys", ] [[package]] -name = "time-macros-impl" -version = "0.1.2" +name = "time" +version = "0.3.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd3c141a1b43194f3f56a1411225df8646c55781d5f26db825b3d98507eb482f" +checksum = "d634a985c4d4238ec39cacaed2e7ae552fbd3c476b552c1deac3021b7d7eaf0c" dependencies = [ - "proc-macro-hack", - "proc-macro2", - "quote", - "standback", - "syn", + "itoa", + "libc", + "num_threads", + "time-macros", ] +[[package]] +name = "time-macros" +version = "0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "42657b1a6f4d817cda8e7a0ace261fe0cc946cf3a80314390b22cc61ae080792" + [[package]] name = "tinyvec" -version = "1.3.1" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "848a1e1181b9f6753b5e96a092749e29b11d19ede67dfbbd6c7dc7e0f49b5338" +checksum = "87cc5ceb3875bb20c2890005a4e226a4651264a5c75edb2421b52861a0a0cb50" dependencies = [ "tinyvec_macros", ] @@ -2791,9 +2667,9 @@ checksum = "cda74da7e1a664f795bb1f8a87ec406fb89a02522cf6e50620d016add6dbbf5c" [[package]] name = "tokio" -version = "1.11.0" +version = "1.21.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4efe6fc2395938c8155973d7be49fe8d03a843726e285e100a8a383cc0154ce" +checksum = "a9e03c497dc955702ba729190dc4aac6f2a0ce97f913e5b1b5912fc5039d9099" dependencies = [ "autocfg", "bytes", @@ -2801,18 +2677,18 @@ dependencies = [ "memchr", "mio", "num_cpus", - "once_cell", "pin-project-lite", "signal-hook-registry", + "socket2", "tokio-macros", "winapi", ] [[package]] name = "tokio-macros" -version = "1.3.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "54473be61f4ebe4efd09cec9bd5d16fa51d70ea0192213d754d2d500457db110" +checksum = "9724f9a975fb987ef7a3cd9be0350edcbe130698af5b8f7a631e23d42d052484" dependencies = [ "proc-macro2", "quote", @@ -2821,13 +2697,13 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.22.0" +version = "0.23.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bc6844de72e57df1980054b38be3a9f4702aba4858be64dd700181a8a6d0e1b6" +checksum = "c43ee83903113e03984cb9e5cebe6c04a5116269e900e3ddba8f068a62adda59" dependencies = [ "rustls", "tokio", - "webpki 0.21.4", + "webpki", ] [[package]] @@ -2844,9 +2720,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.7" +version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b2f3f698253f03119ac0102beaa64f67a67e08074d03a22d18784104543727f" +checksum = "f6edf2d6bc038a43d31353570e27270603f4648d18f5ed10c0e179abe43255af" dependencies = [ "futures-core", "pin-project-lite", @@ -2855,40 +2731,86 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.6.7" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1caa0b0c8d94a049db56b5acf8cba99dc0623aab1b26d5b5f5e2d945846b3592" +checksum = "0bb2e075f03b3d66d8d8785356224ba688d2906a371015e225beeb65ca92c740" dependencies = [ "bytes", "futures-core", "futures-sink", - "log", "pin-project-lite", "tokio", + "tracing", ] [[package]] name = "toml" -version = "0.5.8" +version = "0.5.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a31142970826733df8241ef35dc040ef98c679ab14d7c3e54d827099b3acecaa" +checksum = "8d82e1a7758622a465f8cee077614c73484dac5b836c02ff6a40d5d1010324d7" dependencies = [ "serde", ] [[package]] -name = "tower-service" +name = "tower" +version = "0.4.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +dependencies = [ + "futures-core", + "futures-util", + "pin-project", + "pin-project-lite", + "tokio", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-http" +version = "0.3.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3c530c8675c1dbf98facee631536fa116b5fb6382d7dd6dc1b118d970eafe3ba" +dependencies = [ + "async-compression", + "bitflags", + "bytes", + "futures-core", + "futures-util", + "http", + "http-body", + "http-range-header", + "pin-project-lite", + "tokio", + "tokio-util", + "tower", + "tower-layer", + "tower-service", + "tracing", +] + +[[package]] +name = "tower-layer" version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "360dfd1d6d30e05fda32ace2c8c70e9c0a9da713275777f5a4dbb8a1893930c6" +checksum = "343bc9466d3fe6b0f960ef45960509f84480bf4fd96f92901afe7ff3df9d3a62" + +[[package]] +name = "tower-service" +version = "0.3.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b6bc1c9ce2b5135ac7f93c72918fc37feb872bdc6a5533a8b85eb4b86bfdae52" [[package]] name = "tracing" -version = "0.1.26" +version = "0.1.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09adeb8c97449311ccd28a427f96fb563e7fd31aabf994189879d9da2394b89d" +checksum = "8ce8c33a8d48bd45d624a6e523445fd21ec13d3653cd51f681abf67418f54eb8" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", + "log", "pin-project-lite", "tracing-attributes", "tracing-core", @@ -2896,9 +2818,9 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.15" +version = "0.1.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c42e6fa53307c8a17e4ccd4dc81cf5ec38db9209f59b222210375b54ee40d1e2" +checksum = "4017f8f45139870ca7e672686113917c71c7a6e02d4924eda67186083c03081a" dependencies = [ "proc-macro2", "quote", @@ -2907,18 +2829,19 @@ dependencies = [ [[package]] name = "tracing-core" -version = "0.1.19" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ca517f43f0fb96e0c3072ed5c275fe5eece87e8cb52f4a77b69226d3b1c9df8" +checksum = "24eb03ba0eab1fd845050058ce5e616558e8f8d8fca633e6b163fe25c797213a" dependencies = [ - "lazy_static", + "once_cell", + "valuable", ] [[package]] name = "tracing-flame" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd520fe41c667b437952383f3a1ec14f1fa45d653f719a77eedd6e6a02d8fa54" +checksum = "0bae117ee14789185e129aaee5d93750abe67fdc5a9a62650452bfe4e122a3a9" dependencies = [ "lazy_static", "tracing", @@ -2927,89 +2850,75 @@ dependencies = [ [[package]] name = "tracing-log" -version = "0.1.2" +version = "0.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a6923477a48e41c1951f1999ef8bb5a3023eb723ceadafe78ffb65dc366761e3" +checksum = "78ddad33d2d10b1ed7eb9d1f518a5674713876e97e5bb9b7345a7984fbb4f922" dependencies = [ "lazy_static", "log", "tracing-core", ] -[[package]] -name = "tracing-serde" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fb65ea441fbb84f9f6748fd496cf7f63ec9af5bca94dd86456978d055e8eb28b" -dependencies = [ - "serde", - "tracing-core", -] - [[package]] name = "tracing-subscriber" -version = "0.2.20" +version = "0.3.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9cbe87a2fa7e35900ce5de20220a582a9483a7063811defce79d7cbd59d4cfe" +checksum = "a6176eae26dd70d0c919749377897b54a9276bd7061339665dd68777926b5a70" dependencies = [ - "ansi_term", - "chrono", - "lazy_static", "matchers", + "nu-ansi-term", + "once_cell", "regex", - "serde", - "serde_json", "sharded-slab", "smallvec", "thread_local", "tracing", "tracing-core", "tracing-log", - "tracing-serde", ] [[package]] name = "trust-dns-proto" -version = "0.20.3" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ad0d7f5db438199a6e2609debe3f69f808d074e0a2888ee0bccb45fe234d03f4" +checksum = "4f7f83d1e4a0e4358ac54c5c3681e5d7da5efc5a7a632c90bb6d6669ddd9bc26" dependencies = [ "async-trait", - "cfg-if 1.0.0", + "cfg-if", "data-encoding", "enum-as-inner", "futures-channel", "futures-io", "futures-util", - "idna", + "idna 0.2.3", "ipnet", "lazy_static", - "log", - "rand 0.8.4", + "rand 0.8.5", "smallvec", "thiserror", "tinyvec", "tokio", + "tracing", "url", ] [[package]] name = "trust-dns-resolver" -version = "0.20.3" +version = "0.22.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6ad17b608a64bd0735e67bde16b0636f8aa8591f831a25d18443ed00a699770" +checksum = "aff21aa4dcefb0a1afbfac26deb0adc93888c7d295fb63ab273ef276ba2b7cfe" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "futures-util", "ipconfig", "lazy_static", - "log", "lru-cache", "parking_lot", "resolv-conf", "smallvec", "thiserror", "tokio", + "tracing", "trust-dns-proto", ] @@ -3019,79 +2928,59 @@ version = "0.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "59547bce71d9c38b83d9c0e92b6066c4253371f15005def0c30d9657f50c7642" -[[package]] -name = "twoway" -version = "0.2.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c57ffb460d7c24cd6eda43694110189030a3d1dfe418416d9468fd1c1d290b47" -dependencies = [ - "memchr", - "unchecked-index", -] - [[package]] name = "typenum" -version = "1.14.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b63708a265f51345575b27fe43f9500ad611579e764c79edbc2037b1121959ec" - -[[package]] -name = "ubyte" -version = "0.10.1" +version = "1.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "42756bb9e708855de2f8a98195643dff31a97f0485d90d8467b39dc24be9e8fe" -dependencies = [ - "serde", -] +checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987" [[package]] name = "uncased" -version = "0.9.6" +version = "0.9.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5baeed7327e25054889b9bd4f975f32e5f4c5d434042d59ab6cd4142c0a76ed0" +checksum = "09b01702b0fd0b3fadcf98e098780badda8742d4f4a7676615cad90e8ac73622" dependencies = [ - "serde", "version_check", ] [[package]] -name = "unchecked-index" -version = "0.2.2" +name = "unicode-bidi" +version = "0.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eeba86d422ce181a719445e51872fa30f1f7413b62becb52e95ec91aa262d85c" +checksum = "099b7128301d285f79ddd55b9a83d5e6b9e97c92e0ea0daebee7263e932de992" [[package]] -name = "unicode-bidi" -version = "0.3.6" +name = "unicode-ident" +version = "1.0.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "246f4c42e67e7a4e3c6106ff716a5d067d4132a642840b242e357e468a2a0085" +checksum = "6ceab39d59e4c9499d4e5a8ee0e2735b891bb7308ac83dfb4e80cad195c9f6f3" [[package]] name = "unicode-normalization" -version = "0.1.19" +version = "0.1.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d54590932941a9e9266f0832deed84ebe1bf2e4c9e4a3554d393d18f5e854bf9" +checksum = "5c5713f0fc4b5db668a2ac63cdb7bb4469d8c9fed047b1d0292cc7b0ce2ba921" dependencies = [ "tinyvec", ] [[package]] -name = "unicode-segmentation" -version = "1.8.0" +name = "unicode-xid" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8895849a949e7845e06bd6dc1aa51731a103c42707010a5b591c0038fb73385b" +checksum = "f962df74c8c05a667b5ee8bcf162993134c104e96440b663c8daa176dc772d8c" [[package]] -name = "unicode-xid" -version = "0.2.2" +name = "unsafe-libyaml" +version = "0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ccb82d61f80a663efe1f787a51b16b5a51e3314d6ac365b08639f52387b33f3" +checksum = "c1e5fa573d8ac5f1a856f8d7be41d390ee973daf97c806b2c1a465e4e1406e68" [[package]] -name = "unindent" -version = "0.1.7" +name = "unsigned-varint" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f14ee04d9415b52b3aeab06258a3f07093182b88ba0f9b8d203f211a7a7d41c7" +checksum = "d86a8dc7f45e4c1b0d30e43038c38f274e77af056aa5f74b93c2cf9eb3c1c836" [[package]] name = "untrusted" @@ -3101,16 +2990,30 @@ checksum = "a156c684c91ea7d62626509bce3cb4e1d9ed5c4d978f7b4352658f96a4c26b4a" [[package]] name = "url" -version = "2.2.2" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a507c383b2d33b5fc35d1861e77e6b383d158b2da5e14fe51b83dfedf6fd578c" +checksum = "0d68c799ae75762b8c3fe375feb6600ef5602c883c5d21eb51c09f22b83c4643" dependencies = [ "form_urlencoded", - "idna", - "matches", + "idna 0.3.0", "percent-encoding", ] +[[package]] +name = "uuid" +version = "1.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "feb41e78f93363bb2df8b0e86a2ca30eed7806ea16ea0c790d757cf93f79be83" +dependencies = [ + "getrandom 0.2.7", +] + +[[package]] +name = "valuable" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d" + [[package]] name = "vcpkg" version = "0.2.15" @@ -3119,9 +3022,9 @@ checksum = "accd4ea62f7bb7a82fe23066fb0957d48ef677f6eeb8215f372f52e48bb32426" [[package]] name = "version_check" -version = "0.9.3" +version = "0.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5fecdca9a5291cc2b8dcf7dc02453fee791a280f3743cb0905f8822ae463b3fe" +checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f" [[package]] name = "want" @@ -3141,31 +3044,29 @@ checksum = "cccddf32554fecc6acb585f82a32a72e28b48f8c4c1883ddfeeeaa96f7d8e519" [[package]] name = "wasi" -version = "0.10.2+wasi-snapshot-preview1" +version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd6fbd9a79829dd1ad0cc20627bf1ed606756a7f77edff7b66b7064f9cb327c6" +checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" [[package]] name = "wasm-bindgen" -version = "0.2.76" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ce9b1b516211d33767048e5d47fa2a381ed8b76fc48d2ce4aa39877f9f183e0" +checksum = "eaf9f5aceeec8be17c128b2e93e031fb8a4d469bb9c4ae2d7dc1888b26887268" dependencies = [ - "cfg-if 1.0.0", - "serde", - "serde_json", + "cfg-if", "wasm-bindgen-macro", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.76" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cfe8dc78e2326ba5f845f4b5bf548401604fa20b1dd1d365fb73b6c1d6364041" +checksum = "4c8ffb332579b0557b52d268b91feab8df3615f265d5270fec2a8c95b17c1142" dependencies = [ "bumpalo", - "lazy_static", "log", + "once_cell", "proc-macro2", "quote", "syn", @@ -3174,11 +3075,11 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.26" +version = "0.4.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "95fded345a6559c2cfee778d562300c581f7d4ff3edb9b0d230d69800d213972" +checksum = "23639446165ca5a5de86ae1d8896b737ae80319560fbaa4c2887b7da6e7ebd7d" dependencies = [ - "cfg-if 1.0.0", + "cfg-if", "js-sys", "wasm-bindgen", "web-sys", @@ -3186,9 +3087,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.76" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "44468aa53335841d9d6b6c023eaab07c0cd4bddbcfdee3e2bb1e8d2cb8069fef" +checksum = "052be0f94026e6cbc75cdefc9bae13fd6052cdcaf532fa6c45e7ae33a1e6c810" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -3196,9 +3097,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.76" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0195807922713af1e67dc66132c7328206ed9766af3858164fb583eedc25fbad" +checksum = "07bc0c051dc5f23e307b13285f9d75df86bfdf816c5721e573dec1f9b8aa193c" dependencies = [ "proc-macro2", "quote", @@ -3209,30 +3110,20 @@ dependencies = [ [[package]] name = "wasm-bindgen-shared" -version = "0.2.76" +version = "0.2.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "acdb075a845574a1fa5f09fd77e43f7747599301ea3417a9fbffdeedfc1f4a29" +checksum = "1c38c045535d93ec4f0b4defec448e4291638ee608530863b1e2ba115d4fff7f" [[package]] name = "web-sys" -version = "0.3.53" +version = "0.3.60" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "224b2f6b67919060055ef1a67807367c2066ed520c3862cc013d26cf893a783c" +checksum = "bcda906d8be16e728fd5adc5b729afad4e444e106ab28cd1c7256e54fa61510f" dependencies = [ "js-sys", "wasm-bindgen", ] -[[package]] -name = "webpki" -version = "0.21.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8e38c0608262c46d4a56202ebabdeb094cef7e560ca7a226c6bf055188aa4ea" -dependencies = [ - "ring", - "untrusted", -] - [[package]] name = "webpki" version = "0.22.0" @@ -3245,21 +3136,21 @@ dependencies = [ [[package]] name = "weezl" -version = "0.1.5" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8b77fdfd5a253be4ab714e4ffa3c49caf146b4de743e97510c0656cf90f1e8e" +checksum = "9193164d4de03a926d909d3bc7c30543cecb35400c02114792c2cae20d5e2dbb" [[package]] name = "widestring" -version = "0.4.3" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c168940144dd21fd8046987c16a46a33d5fc84eec29ef9dcddc2ac9e31526b7c" +checksum = "17882f045410753661207383517a6f62ec3dbeb6a4ed2acce01f0728238d1983" [[package]] name = "wildmatch" -version = "2.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6c48bd20df7e4ced539c12f570f937c6b4884928a87fee70a479d72f031d4e0" +checksum = "ee583bdc5ff1cf9db20e9db5bb3ff4c3089a8f6b8b31aff265c9aba85812db86" [[package]] name = "winapi" @@ -3284,14 +3175,48 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] -name = "winreg" -version = "0.6.2" +name = "windows-sys" +version = "0.36.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2986deb581c4fe11b621998a5e53361efe6b48a151178d0cd9eeffa4dc6acc9" +checksum = "ea04155a16a59f9eab786fe12a4a450e75cdb175f9e0d80da1e17db09f55b8d2" dependencies = [ - "winapi", + "windows_aarch64_msvc", + "windows_i686_gnu", + "windows_i686_msvc", + "windows_x86_64_gnu", + "windows_x86_64_msvc", ] +[[package]] +name = "windows_aarch64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9bb8c3fd39ade2d67e9874ac4f3db21f0d710bee00fe7cab16949ec184eeaa47" + +[[package]] +name = "windows_i686_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "180e6ccf01daf4c426b846dfc66db1fc518f074baa793aa7d9b9aaeffad6a3b6" + +[[package]] +name = "windows_i686_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e2e7917148b2812d1eeafaeb22a97e4813dfa60a3f8f78ebe204bcc88f12f024" + +[[package]] +name = "windows_x86_64_gnu" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4dcd171b8776c41b97521e5da127a2d86ad280114807d0b2ab1e462bc764d9e1" + +[[package]] +name = "windows_x86_64_msvc" +version = "0.36.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c811ca4a8c853ef420abd8592ba53ddbbac90410fab6903b3e79972a631f7680" + [[package]] name = "winreg" version = "0.7.0" @@ -3301,35 +3226,26 @@ dependencies = [ "winapi", ] -[[package]] -name = "yaml-rust" -version = "0.4.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56c1936c4cc7a1c9ab21a1ebb602eb942ba868cbd44a99cb7cdc5892335e1c85" -dependencies = [ - "linked-hash-map", -] - [[package]] name = "yansi" -version = "0.5.0" +version = "0.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fc79f4a1e39857fc00c3f662cbf2651c771f00e9c15fe2abc341806bd46bd71" +checksum = "09041cd90cf85f7f8b2df60c646f853b7f535ce68f85244eb6731cf89fa498ec" [[package]] name = "zeroize" -version = "1.4.1" +version = "1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "377db0846015f7ae377174787dd452e1c5f5a9050bc6f954911d01f116daa0cd" +checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f" dependencies = [ "zeroize_derive", ] [[package]] name = "zeroize_derive" -version = "1.1.0" +version = "1.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2c1e130bebaeab2f23886bf9acbaca14b092408c452543c857f66399cd6dab1" +checksum = "3f8f187641dad4f680d25c4bfc4225b418165984179f26ca76ec4fb6441d3a17" dependencies = [ "proc-macro2", "quote", @@ -3338,32 +3254,10 @@ dependencies = [ ] [[package]] -name = "zstd" -version = "0.5.4+zstd.1.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69996ebdb1ba8b1517f61387a883857818a66c8a295f487b1ffd8fd9d2c82910" -dependencies = [ - "zstd-safe", -] - -[[package]] -name = "zstd-safe" -version = "2.0.6+zstd.1.4.7" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "98aa931fb69ecee256d44589d19754e61851ae4769bf963b385119b1cc37a49e" -dependencies = [ - "libc", - "zstd-sys", -] - -[[package]] -name = "zstd-sys" -version = "1.4.18+zstd.1.4.7" +name = "zigzag" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a1e6e8778706838f43f771d80d37787cb2fe06dafe89dd3aebaf6721b9eaec81" +checksum = "70b40401a28d86ce16a330b863b86fd7dbee4d7c940587ab09ab8c019f9e3fdf" dependencies = [ - "cc", - "glob", - "itertools 0.9.0", - "libc", + "num-traits", ] diff --git a/Cargo.toml b/Cargo.toml index dae68bf1..e7e48c83 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,87 +6,103 @@ authors = ["timokoesters "] homepage = "https://conduit.rs" repository = "https://gitlab.com/famedly/conduit" readme = "README.md" -version = "0.2.0" -edition = "2018" +version = "0.4.0-next" +rust-version = "1.63" +edition = "2021" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html [dependencies] -# Used to handle requests -# TODO: This can become optional as soon as proper configs are supported -# rocket = { git = "https://github.com/SergioBenitez/Rocket.git", rev = "801e04bd5369eb39e126c75f6d11e1e9597304d8", features = ["tls"] } # Used to handle requests -rocket = { version = "0.5.0-rc.1", features = ["tls"] } # Used to handle requests +# Web framework +axum = { version = "0.5.8", default-features = false, features = ["form", "headers", "http1", "http2", "json", "matched-path"], optional = true } +axum-server = { version = "0.4.0", features = ["tls-rustls"] } +tower = { version = "0.4.8", features = ["util"] } +tower-http = { version = "0.3.4", features = ["add-extension", "cors", "compression-full", "sensitive-headers", "trace", "util"] } # Used for matrix spec type definitions and helpers #ruma = { version = "0.4.0", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -ruma = { git = "https://github.com/ruma/ruma", rev = "58cdcae1f9a8f4824bcbec1de1bb13e659c66804", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } +ruma = { git = "https://github.com/ruma/ruma", rev = "fba6f70c2df8294f96567f56464a46e3d237a8e9", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-msc2448", "unstable-exhaustive-types", "ring-compat", "unstable-unspecified" ] } #ruma = { git = "https://github.com/timokoesters/ruma", rev = "50c1db7e0a3a21fc794b0cce3b64285a4c750c71", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } #ruma = { path = "../ruma/crates/ruma", features = ["compat", "rand", "appservice-api-c", "client-api", "federation-api", "push-gateway-api-c", "state-res", "unstable-pre-spec", "unstable-exhaustive-types"] } -# Used for long polling and federation sender, should be the same as rocket::tokio -tokio = "1.11.0" +# Async runtime and utilities +tokio = { version = "1.11.0", features = ["fs", "macros", "signal", "sync"] } # Used for storing data permanently -sled = { version = "0.34.6", features = ["compression", "no_metrics"], optional = true } +#sled = { version = "0.34.7", features = ["compression", "no_metrics"], optional = true } #sled = { git = "https://github.com/spacejam/sled.git", rev = "e4640e0773595229f398438886f19bca6f7326a2", features = ["compression"] } +persy = { version = "1.0.0", optional = true, features = ["background_ops"] } # Used for the http request / response body type for Ruma endpoints used with reqwest bytes = "1.1.0" -# Used for rocket<->ruma conversions http = "0.2.4" # Used to find data directory for default db path -directories = "3.0.2" +directories = "4.0.0" # Used for ruma wrapper -serde_json = { version = "1.0.67", features = ["raw_value"] } +serde_json = { version = "1.0.68", features = ["raw_value"] } # Used for appservice registration files -serde_yaml = "0.8.20" +serde_yaml = "0.9.13" # Used for pdu definition -serde = "1.0.130" +serde = { version = "1.0.130", features = ["rc"] } # Used for secure identifiers rand = "0.8.4" # Used to hash passwords -rust-argon2 = "0.8.3" +rust-argon2 = "1.0.0" # Used to send requests -reqwest = { version = "0.11.4", default-features = false, features = ["rustls-tls-native-roots", "socks"] } -# Custom TLS verifier -rustls = { version = "0.19.1", features = ["dangerous_configuration"] } -rustls-native-certs = "0.5.0" -webpki = "0.22.0" +reqwest = { default-features = false, features = ["rustls-tls-native-roots", "socks"], git = "https://github.com/timokoesters/reqwest", rev = "57b7cf4feb921573dfafad7d34b9ac6e44ead0bd" } # Used for conduit::Error type -thiserror = "1.0.28" +thiserror = "1.0.29" # Used to generate thumbnails for images -image = { version = "0.23.14", default-features = false, features = ["jpeg", "png", "gif"] } +image = { version = "0.24.4", default-features = false, features = ["jpeg", "png", "gif"] } # Used to encode server public key base64 = "0.13.0" # Used when hashing the state ring = "0.16.20" # Used when querying the SRV record of other servers -trust-dns-resolver = "0.20.3" +trust-dns-resolver = "0.22.0" # Used to find matching events for appservices regex = "1.5.4" # jwt jsonwebtokens -jsonwebtoken = "7.2.0" +jsonwebtoken = "8.1.1" # Performance measurements -tracing = { version = "0.1.26", features = ["release_max_level_warn"] } -tracing-subscriber = "0.2.20" -tracing-flame = "0.1.0" -opentelemetry = { version = "0.16.0", features = ["rt-tokio"] } -opentelemetry-jaeger = { version = "0.15.0", features = ["rt-tokio"] } +tracing = { version = "0.1.27", features = [] } +tracing-subscriber = { version = "0.3.16", features = ["env-filter"] } +tracing-flame = "0.2.0" +opentelemetry = { version = "0.18.0", features = ["rt-tokio"] } +opentelemetry-jaeger = { version = "0.17.0", features = ["rt-tokio"] } lru-cache = "0.1.2" -rusqlite = { version = "0.25.3", optional = true, features = ["bundled"] } -parking_lot = { version = "0.11.2", optional = true } +rusqlite = { version = "0.28.0", optional = true, features = ["bundled"] } +parking_lot = { version = "0.12.1", optional = true } crossbeam = { version = "0.8.1", optional = true } num_cpus = "1.13.0" threadpool = "1.8.1" heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } +rocksdb = { version = "0.17.0", default-features = true, features = ["multi-threaded-cf", "zstd"], optional = true } + thread_local = "1.1.3" +# used for TURN server authentication +hmac = "0.12.1" +sha-1 = "0.10.0" +# used for conduit's CLI and admin room command parsing +clap = { version = "4.0.11", default-features = false, features = ["std", "derive", "help", "usage", "error-context"] } +futures-util = { version = "0.3.17", default-features = false } +# Used for reading the configuration from conduit.toml & environment variables +figment = { version = "0.10.6", features = ["env", "toml"] } + +tikv-jemalloc-ctl = { version = "0.5.0", features = ["use_std"], optional = true } +tikv-jemallocator = { version = "0.5.0", features = ["unprefixed_malloc_on_supported_platforms"], optional = true } +lazy_static = "1.4.0" +async-trait = "0.1.57" [features] -default = ["conduit_bin", "backend_sqlite"] -backend_sled = ["sled"] +default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "jemalloc"] +#backend_sled = ["sled"] +backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] backend_heed = ["heed", "crossbeam"] -sqlite = ["rusqlite", "parking_lot", "crossbeam", "tokio/signal"] -conduit_bin = [] # TODO: add rocket to this when it is optional +backend_rocksdb = ["rocksdb"] +jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] +sqlite = ["rusqlite", "parking_lot", "tokio/signal"] +conduit_bin = ["axum"] [[bin]] name = "conduit" @@ -120,13 +136,12 @@ maintainer-scripts = "debian/" systemd-units = { unit-name = "matrix-conduit" } [profile.dev] -lto = 'thin' +lto = 'off' incremental = true [profile.release] lto = 'thin' incremental = true - codegen-units=32 # If you want to make flamegraphs, enable debug info: # debug = true diff --git a/Cross.toml b/Cross.toml new file mode 100644 index 00000000..5d99a358 --- /dev/null +++ b/Cross.toml @@ -0,0 +1,23 @@ +[build.env] +# CI uses an S3 endpoint to store sccache artifacts, so their config needs to +# be available in the cross container as well +passthrough = [ + "RUSTC_WRAPPER", + "AWS_ACCESS_KEY_ID", + "AWS_SECRET_ACCESS_KEY", + "SCCACHE_BUCKET", + "SCCACHE_ENDPOINT", + "SCCACHE_S3_USE_SSL", +] + +[target.aarch64-unknown-linux-musl] +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-aarch64-unknown-linux-musl:latest" + +[target.arm-unknown-linux-musleabihf] +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-arm-unknown-linux-musleabihf:latest" + +[target.armv7-unknown-linux-musleabihf] +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-armv7-unknown-linux-musleabihf:latest" + +[target.x86_64-unknown-linux-musl] +image = "registry.gitlab.com/jfowl/conduit-containers/rust-cross-x86_64-unknown-linux-musl@sha256:b6d689e42f0236c8a38b961bca2a12086018b85ed20e0826310421daf182e2bb" diff --git a/DEPLOY.md b/DEPLOY.md index 84dd2beb..1c7d1af5 100644 --- a/DEPLOY.md +++ b/DEPLOY.md @@ -1,46 +1,55 @@ # Deploying Conduit -## Getting help - -If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us in `#conduit:matrix.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). +> ## Getting help +> +> If you run into any problems while setting up Conduit, write an email to `timo@koesters.xyz`, ask us +> in `#conduit:fachschaften.org` or [open an issue on GitLab](https://gitlab.com/famedly/conduit/-/issues/new). ## Installing Conduit -You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: - -| CPU Architecture | GNU (Ubuntu, Debian, ArchLinux, ...) | MUSL (Alpine, ... ) | -| -------------------- | ------------------------------------- | ----------------------- | -| x84_64 / amd64 | [Download][x84_64-gnu] | [Download][x84_64-musl] | -| armv7 (Raspberry Pi) | [Download][armv7-gnu] | - | -| armv8 / aarch64 | [Download][armv8-gnu] | - | - -[x84_64-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-gnu?job=build:release:cargo:x86_64-unknown-linux-gnu +Although you might be able to compile Conduit for Windows, we do recommend running it on a linux server. We therefore +only offer Linux binaries. -[x84_64-musl]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl - -[armv7-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-gnueabihf?job=build:release:cargo:armv7-unknown-linux-gnueabihf +You may simply download the binary that fits your machine. Run `uname -m` to see what you need. Now copy the right url: -[armv8-gnu]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-gnu?job=build:release:cargo:aarch64-unknown-linux-gnu +| CPU Architecture | Download stable version | Download development version | +| ------------------------------------------- | ------------------------------ | ---------------------------- | +| x84_64 / amd64 (Most servers and computers) | [Download][x84_64-musl-master] | [Download][x84_64-musl-next] | +| armv6 | [Download][armv6-musl-master] | [Download][armv6-musl-next] | +| armv7 (e.g. Raspberry Pi by default) | [Download][armv7-musl-master] | [Download][armv7-musl-next] | +| armv8 / aarch64 | [Download][armv8-musl-master] | [Download][armv8-musl-next] | + +[x84_64-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl +[armv6-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf +[armv7-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf +[armv8-musl-master]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/master/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl +[x84_64-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-x86_64-unknown-linux-musl?job=build:release:cargo:x86_64-unknown-linux-musl +[armv6-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-arm-unknown-linux-musleabihf?job=build:release:cargo:arm-unknown-linux-musleabihf +[armv7-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-armv7-unknown-linux-musleabihf?job=build:release:cargo:armv7-unknown-linux-musleabihf +[armv8-musl-next]: https://gitlab.com/famedly/conduit/-/jobs/artifacts/next/raw/conduit-aarch64-unknown-linux-musl?job=build:release:cargo:aarch64-unknown-linux-musl ```bash $ sudo wget -O /usr/local/bin/matrix-conduit $ sudo chmod +x /usr/local/bin/matrix-conduit ``` -Alternatively, you may compile the binary yourself using +Alternatively, you may compile the binary yourself + +```bash +$ sudo apt install libclang-dev build-essential +``` ```bash $ cargo build --release ``` -Note that this currently requires Rust 1.50. -If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](CROSS_COMPILE.md). +If you want to cross compile Conduit to another architecture, read the [Cross-Compile Guide](cross/README.md). ## Adding a Conduit user -While Conduit can run as any user it is usually better to use dedicated users for different services. -This also allows you to make sure that the file permissions are correctly set up. +While Conduit can run as any user it is usually better to use dedicated users for different services. This also allows +you to make sure that the file permissions are correctly set up. In Debian you can use this command to create a Conduit user: @@ -48,11 +57,16 @@ In Debian you can use this command to create a Conduit user: sudo adduser --system conduit --no-create-home ``` +## Forwarding ports in the firewall or the router + +Conduit uses the ports 443 and 8448 both of which need to be open in the firewall. + +If Conduit runs behind a router or in a container and has a different public IP address than the host system these public ports need to be forwarded directly or indirectly to the port mentioned in the config. + ## Setting up a systemd service -Now we'll set up a systemd service for Conduit, so it's easy to start/stop -Conduit and set it to autostart when your server reboots. Simply paste the -default systemd service you can find below into +Now we'll set up a systemd service for Conduit, so it's easy to start/stop Conduit and set it to autostart when your +server reboots. Simply paste the default systemd service you can find below into `/etc/systemd/system/conduit.service`. ```systemd @@ -77,31 +91,38 @@ Finally, run $ sudo systemctl daemon-reload ``` - ## Creating the Conduit configuration file -Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment to read it. You need to change at least the server name.** +Now we need to create the Conduit's config file in `/etc/matrix-conduit/conduit.toml`. Paste this in **and take a moment +to read it. You need to change at least the server name.** +You can also choose to use a different database backend, but right now only `rocksdb` and `sqlite` are recommended. ```toml [global] -# The server_name is the name of this server. It is used as a suffix for user +# The server_name is the pretty name of this server. It is used as a suffix for user # and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server # for more information # YOU NEED TO EDIT THIS #server_name = "your.server.name" # This is the only directory where Conduit will save its data -database_path = "/var/lib/matrix-conduit/conduit_db" +database_path = "/var/lib/matrix-conduit/" +database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port # 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = 6167 # Max size for uploads @@ -110,42 +131,38 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created -# Note: existing rooms will continue to work -allow_encryption = true allow_federation = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#workers = 4 # default: cpu core count * 2 +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy - -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 +#address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. ``` ## Setting the correct file permissions -As we are using a Conduit specific user we need to allow it to read the config. -To do that you can run this command on Debian: +As we are using a Conduit specific user we need to allow it to read the config. To do that you can run this command on +Debian: ```bash -sudo chown -R conduit:nogroup /etc/matrix-conduit +sudo chown -R root:root /etc/matrix-conduit +sudo chmod 755 /etc/matrix-conduit ``` If you use the default database path you also need to run this: ```bash -sudo mkdir -p /var/lib/matrix-conduit/conduit_db -sudo chown -R conduit:nogroup /var/lib/matrix-conduit/conduit_db +sudo mkdir -p /var/lib/matrix-conduit/ +sudo chown -R conduit:nogroup /var/lib/matrix-conduit/ +sudo chmod 700 /var/lib/matrix-conduit/ ``` - ## Setting up the Reverse Proxy -This depends on whether you use Apache, Nginx or another web server. +This depends on whether you use Apache, Caddy, Nginx or another web server. ### Apache @@ -171,11 +188,22 @@ ProxyPassReverse /_matrix/ http://127.0.0.1:6167/_matrix/ $ sudo systemctl reload apache2 ``` +### Caddy +Create `/etc/caddy/conf.d/conduit_caddyfile` and enter this (substitute for your server name). +```caddy +your.server.name, your.server.name:8448 { + reverse_proxy /_matrix/* 127.0.0.1:6167 +} +``` +That's it! Just start or enable the service and you're set. +```bash +$ sudo systemctl enable caddy +``` + ### Nginx -If you use Nginx and not Apache, add the following server section inside the -http section of `/etc/nginx/nginx.conf` +If you use Nginx and not Apache, add the following server section inside the http section of `/etc/nginx/nginx.conf` ```nginx server { @@ -198,22 +226,23 @@ server { include /etc/letsencrypt/options-ssl-nginx.conf; } ``` + **You need to make some edits again.** When you are done, run ```bash $ sudo systemctl reload nginx ``` - ## SSL Certificate +If you chose Caddy as your web proxy SSL certificates are handled automatically and you can skip this step. + The easiest way to get an SSL certificate, if you don't have one already, is to install `certbot` and run this: ```bash $ sudo certbot -d your.server.name ``` - ## You're done! Now you can start Conduit with: @@ -239,4 +268,15 @@ $ curl https://your.server.name/_matrix/client/versions $ curl https://your.server.name:8448/_matrix/client/versions ``` +- To check if your server can talk with other homeservers, you can use the [Matrix Federation Tester](https://federationtester.matrix.org/). + If you can register but cannot join federated rooms check your config again and also check if the port 8448 is open and forwarded correctly. + +# What's next? + +## Audio/Video calls + +For Audio/Video call functionality see the [TURN Guide](TURN.md). + +## Appservices + If you want to set up an appservice, take a look at the [Appservice Guide](APPSERVICES.md). diff --git a/Dockerfile b/Dockerfile index f4b176f5..3154ebb6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,97 +1,84 @@ -# Using multistage build: -# https://docs.docker.com/develop/develop-images/multistage-build/ -# https://whitfin.io/speeding-up-rust-docker-builds/ - - -########################## BUILD IMAGE ########################## -# Alpine build image to build Conduit's statically compiled binary -FROM alpine:3.14 as builder - -# Install packages needed for building all crates -RUN apk add --no-cache \ - cargo \ - openssl-dev - -# Specifies if the local project is build or if Conduit gets build -# from the official git repository. Defaults to the git repo. -ARG LOCAL=false -# Specifies which revision/commit is build. Defaults to HEAD -ARG GIT_REF=origin/master - -# Copy project files from current folder -COPY . . -# Build it from the copied local files or from the official git repository -RUN if [[ $LOCAL == "true" ]]; then \ - mv ./docker/healthcheck.sh . ; \ - echo "Building from local source..." ; \ - cargo install --path . ; \ - else \ - echo "Building revision '${GIT_REF}' from online source..." ; \ - cargo install --git "https://gitlab.com/famedly/conduit.git" --rev ${GIT_REF} ; \ - echo "Loadings healthcheck script from online source..." ; \ - wget "https://gitlab.com/famedly/conduit/-/raw/${GIT_REF#origin/}/docker/healthcheck.sh" ; \ - fi - -########################## RUNTIME IMAGE ########################## -# Create new stage with a minimal image for the actual -# runtime image/container -FROM alpine:3.14 - -ARG CREATED -ARG VERSION -ARG GIT_REF=origin/master - -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" - -# Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md -# including a custom label specifying the build command -LABEL org.opencontainers.image.created=${CREATED} \ - org.opencontainers.image.authors="Conduit Contributors" \ - org.opencontainers.image.title="Conduit" \ - org.opencontainers.image.version=${VERSION} \ - org.opencontainers.image.vendor="Conduit Contributors" \ - org.opencontainers.image.description="A Matrix homeserver written in Rust" \ - org.opencontainers.image.url="https://conduit.rs/" \ - org.opencontainers.image.revision=${GIT_REF} \ - org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ - org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="" \ - org.opencontainers.image.ref.name="" \ - org.label-schema.docker.build="docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)" \ - maintainer="Weasy666" - -# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose. +# syntax=docker/dockerfile:1 +FROM docker.io/rust:1.63-bullseye AS builder +WORKDIR /usr/src/conduit + +# Install required packages to build Conduit and it's dependencies +RUN apt-get update && \ + apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5 + +# == Build dependencies without our own code separately for caching == +# +# Need a fake main.rs since Cargo refuses to build anything otherwise. +# +# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature +# request that would allow just dependencies to be compiled, presumably +# regardless of whether source files are available. +RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs +COPY Cargo.toml Cargo.lock ./ +RUN cargo build --release && rm -r src + +# Copy over actual Conduit sources +COPY src src + +# main.rs and lib.rs need their timestamp updated for this to work correctly since +# otherwise the build with the fake main.rs from above is newer than the +# source files (COPY preserves timestamps). +# +# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit +RUN touch src/main.rs && touch src/lib.rs && cargo build --release + +# --------------------------------------------------------------------------------------------------------------- +# Stuff below this line actually ends up in the resulting docker image +# --------------------------------------------------------------------------------------------------------------- +FROM docker.io/debian:bullseye-slim AS runner + +# Standard port on which Conduit launches. +# You still need to map the port when using the docker command or docker-compose. EXPOSE 6167 -# Copy config files from context and the binary from -# the "builder" stage to the current stage into folder -# /srv/conduit and create data folder for database -RUN mkdir -p /srv/conduit/.local/share/conduit -COPY --from=builder /root/.cargo/bin/conduit /srv/conduit/ -COPY --from=builder ./healthcheck.sh /srv/conduit/ - -# Add www-data user and group with UID 82, as used by alpine -# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install -RUN set -x ; \ - addgroup -Sg 82 www-data 2>/dev/null ; \ - adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \ - addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 +ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit -# Change ownership of Conduit files to www-data user and group -RUN chown -cR www-data:www-data /srv/conduit +ENV CONDUIT_PORT=6167 \ + CONDUIT_ADDRESS="0.0.0.0" \ + CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ + CONDUIT_CONFIG='' +# └─> Set no config file to do all configuration with env vars -# Install packages needed to run Conduit -RUN apk add --no-cache \ - ca-certificates \ - curl \ - libgcc +# Conduit needs: +# ca-certificates: for https +# iproute2 & wget: for the healthcheck script +RUN apt-get update && apt-get -y --no-install-recommends install \ + ca-certificates \ + iproute2 \ + wget \ + && rm -rf /var/lib/apt/lists/* # Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh +COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh +HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh -# Set user to www-data -USER www-data +# Copy over the actual Conduit binary from the builder stage +COPY --from=builder /usr/src/conduit/target/release/conduit /srv/conduit/conduit + +# Improve security: Don't run stuff as root, that does not need to run as root +# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. +ARG USER_ID=1000 +ARG GROUP_ID=1000 +RUN set -x ; \ + groupadd -r -g ${GROUP_ID} conduit ; \ + useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1 + +# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable: +RUN chown -cR conduit:conduit /srv/conduit && \ + chmod +x /srv/conduit/healthcheck.sh && \ + mkdir -p ${DEFAULT_DB_PATH} && \ + chown -cR conduit:conduit ${DEFAULT_DB_PATH} + +# Change user to conduit, no root permissions afterwards: +USER conduit # Set container home directory WORKDIR /srv/conduit -# Run Conduit + +# Run Conduit and print backtraces on panics +ENV RUST_BACKTRACE=1 ENTRYPOINT [ "/srv/conduit/conduit" ] diff --git a/README.md b/README.md index e667d18d..730b2512 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ # Conduit + ### A Matrix homeserver written in Rust #### What is the goal? @@ -7,7 +8,6 @@ An efficient Matrix homeserver that's easy to set up and just works. You can ins it on a mini-computer like the Raspberry Pi to host Matrix for your family, friends or company. - #### Can I try it out? Yes! You can test our Conduit instance by opening a Matrix client ( or Element Android for @@ -17,10 +17,9 @@ It is hosted on a ODROID HC 2 with 2GB RAM and a SAMSUNG Exynos 5422 CPU, which was used in the Samsung Galaxy S5. It joined many big rooms including Matrix HQ. - #### What is the current status? -As of 2021-09-01, Conduit is Beta, meaning you can join and participate in most +Conduit is Beta, meaning you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time. @@ -31,26 +30,23 @@ There are still a few important features missing: Check out the [Conduit 1.0 Release Milestone](https://gitlab.com/famedly/conduit/-/milestones/3). - #### How can I deploy my own? -Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md)\ -Debian package: [debian/README.Debian](debian/README.Debian)\ -Docker: [docker/README.md](docker/README.md) +- Simple install (this was tested the most): [DEPLOY.md](DEPLOY.md) +- Debian package: [debian/README.Debian](debian/README.Debian) +- Docker: [docker/README.md](docker/README.md) If you want to connect an Appservice to Conduit, take a look at [APPSERVICES.md](APPSERVICES.md). - #### How can I contribute? 1. Look for an issue you would like to work on and make sure it's not assigned to other users 2. Ask someone to assign the issue to you (comment on the issue or chat in - #conduit:nordgedanken.dev) -3. Fork the repo and work on the issue. #conduit:nordgedanken.dev is happy to help :) + [#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org)) +3. Fork the repo and work on the issue.[#conduit:fachschaften.org](https://matrix.to/#/#conduit:fachschaften.org) is happy to help :) 4. Submit a MR - #### Thanks to Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individuals for financially supporting this project. @@ -58,15 +54,13 @@ Thanks to Famedly, Prototype Fund (DLR and German BMBF) and all other individual Thanks to the contributors to Conduit and all libraries we use, for example: - Ruma: A clean library for the Matrix Spec in Rust -- Rocket: A flexible web framework - +- axum: A modular web framework #### Donate Liberapay: \ Bitcoin: `bc1qnnykf986tw49ur7wx9rpw2tevpsztvar5x8w4n` - #### Logo Lightning Bolt Logo: https://github.com/mozilla/fxemoji/blob/gh-pages/svgs/nature/u26A1-bolt.svg \ diff --git a/TURN.md b/TURN.md new file mode 100644 index 00000000..63c1e99f --- /dev/null +++ b/TURN.md @@ -0,0 +1,25 @@ +# Setting up TURN/STURN + +## General instructions + +* It is assumed you have a [Coturn server](https://github.com/coturn/coturn) up and running. See [Synapse reference implementation](https://github.com/matrix-org/synapse/blob/develop/docs/turn-howto.md). + +## Edit/Add a few settings to your existing conduit.toml + +``` +# Refer to your Coturn settings. +# `your.turn.url` has to match the REALM setting of your Coturn as well as `transport`. +turn_uris = ["turn:your.turn.url?transport=udp", "turn:your.turn.url?transport=tcp"] + +# static-auth-secret of your turnserver +turn_secret = "ADD SECRET HERE" + +# If you have your TURN server configured to use a username and password +# you can provide these information too. In this case comment out `turn_secret above`! +#turn_username = "" +#turn_password = "" +``` + +## Apply settings + +Restart Conduit. \ No newline at end of file diff --git a/conduit-example.toml b/conduit-example.toml index 4275f528..0549030e 100644 --- a/conduit-example.toml +++ b/conduit-example.toml @@ -1,22 +1,35 @@ +# ============================================================================= +# This is the official example config for Conduit. +# If you use it for your server, you will need to adjust it to your own needs. +# At the very least, change the server_name field! +# ============================================================================= + + [global] -# The server_name is the name of this server. It is used as a suffix for user +# The server_name is the pretty name of this server. It is used as a suffix for user # and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server # for more information # YOU NEED TO EDIT THIS #server_name = "your.server.name" # This is the only directory where Conduit will save its data -database_path = "/var/lib/conduit/" +database_path = "/var/lib/matrix-conduit/" +database_backend = "rocksdb" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port # 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = 6167 # Max size for uploads @@ -25,24 +38,15 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created -# Note: existing rooms will continue to work -#allow_encryption = false -#allow_federation = false +allow_federation = true -# Enable jaeger to support monitoring and troubleshooting through jaeger -#allow_jaeger = false +# Enable the display name lightning bolt on registration. +enable_lightning_bolt = true trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" -#workers = 4 # default: cpu core count * 2 +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" address = "127.0.0.1" # This makes sure Conduit can only be reached using the reverse proxy #address = "0.0.0.0" # If Conduit is running in a container, make sure the reverse proxy (ie. Traefik) can reach it. - -proxy = "none" # more examples can be found at src/database/proxy.rs:6 - -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 diff --git a/debian/matrix-conduit.service b/debian/matrix-conduit.service index 7c12d1a7..299f2680 100644 --- a/debian/matrix-conduit.service +++ b/debian/matrix-conduit.service @@ -3,6 +3,7 @@ Description=Conduit Matrix homeserver After=network.target [Service] +DynamicUser=yes User=_matrix-conduit Group=_matrix-conduit Type=simple diff --git a/debian/postinst b/debian/postinst index 6bd1a3a7..73e554b7 100644 --- a/debian/postinst +++ b/debian/postinst @@ -5,7 +5,7 @@ set -e CONDUIT_CONFIG_PATH=/etc/matrix-conduit CONDUIT_CONFIG_FILE="${CONDUIT_CONFIG_PATH}/conduit.toml" -CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/conduit_db +CONDUIT_DATABASE_PATH=/var/lib/matrix-conduit/ case "$1" in configure) @@ -36,18 +36,24 @@ case "$1" in mkdir -p "$CONDUIT_CONFIG_PATH" cat > "$CONDUIT_CONFIG_FILE" << EOF [global] -# The server_name is the name of this server. It is used as a suffix for user -# and room ids. Examples: matrix.org, conduit.rs -# The Conduit server needs to be reachable at https://your.server.name/ on port -# 443 (client-server) and 8448 (federation) OR you can create /.well-known -# files to redirect requests. See +# The server_name is the pretty name of this server. It is used as a suffix for +# user and room ids. Examples: matrix.org, conduit.rs + +# The Conduit server needs all /_matrix/ requests to be reachable at +# https://your.server.name/ on port 443 (client-server) and 8448 (federation). + +# If that's not possible for you, you can create /.well-known files to redirect +# requests. See # https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client -# and https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server -# for more information. +# and +# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server +# for more information + server_name = "${CONDUIT_SERVER_NAME}" # This is the only directory where Conduit will save its data. database_path = "${CONDUIT_DATABASE_PATH}" +database_backend = "rocksdb" # The address Conduit will be listening on. # By default the server listens on address 0.0.0.0. Change this to 127.0.0.1 to @@ -56,7 +62,8 @@ address = "${CONDUIT_ADDRESS}" # The port Conduit will be running on. You need to set up a reverse proxy in # your web server (e.g. apache or nginx), so all requests to /_matrix on port -# 443 and 8448 will be forwarded to the Conduit instance running on this port. +# 443 and 8448 will be forwarded to the Conduit instance running on this port +# Docker users: Don't change this, you'll need to map an external port to this. port = ${CONDUIT_PORT} # Max size for uploads @@ -65,20 +72,12 @@ max_request_size = 20_000_000 # in bytes # Enables registration. If set to false, no users can register on this server. allow_registration = true -# Disable encryption, so no new encrypted rooms can be created. -# Note: Existing rooms will continue to work. -#allow_encryption = false -#allow_federation = false +allow_federation = true -# Enable jaeger to support monitoring and troubleshooting through jaeger. -#allow_jaeger = false +trusted_servers = ["matrix.org"] #max_concurrent_requests = 100 # How many requests Conduit sends to other servers at the same time -#log = "info,state_res=warn,rocket=off,_=off,sled=off" -#workers = 4 # default: cpu core count * 2 - -# The total amount of memory that the database will use. -#db_cache_capacity_mb = 200 +#log = "warn,state_res=warn,rocket=off,_=off,sled=off" EOF fi ;; diff --git a/docker-compose.yml b/docker-compose.yml index 530fc198..d9c32b51 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -20,27 +20,21 @@ services: ports: - 8448:6167 volumes: - - db:/srv/conduit/.local/share/conduit - ### Uncomment if you want to use conduit.toml to configure Conduit - ### Note: Set env vars will override conduit.toml values - # - ./conduit.toml:/srv/conduit/conduit.toml + - db:/var/lib/matrix-conduit/ environment: - CONDUIT_SERVER_NAME: localhost:6167 # replace with your own name - CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB CONDUIT_ALLOW_REGISTRATION: 'true' - ### Uncomment and change values as desired - # CONDUIT_ADDRESS: 0.0.0.0 - # CONDUIT_PORT: 6167 - # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' - # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" - # CONDUIT_ALLOW_JAEGER: 'false' - # CONDUIT_ALLOW_ENCRYPTION: 'false' - # CONDUIT_ALLOW_FEDERATION: 'false' - # CONDUIT_DATABASE_PATH: /srv/conduit/.local/share/conduit - # CONDUIT_WORKERS: 10 - # CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB - + CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 + #CONDUIT_LOG: warn,rocket=off,_=off,sled=off + CONDUIT_ADDRESS: 0.0.0.0 + CONDUIT_CONFIG: '' # Ignore this + # ### Uncomment if you want to use your own Element-Web App. ### Note: You need to provide a config.json for Element and you also need a second ### Domain or Subdomain for the communication between Element and Conduit @@ -56,4 +50,4 @@ services: # - homeserver volumes: - db: + db: diff --git a/docker/README.md b/docker/README.md index 0e834820..36717c4f 100644 --- a/docker/README.md +++ b/docker/README.md @@ -2,53 +2,51 @@ > **Note:** To run and use Conduit you should probably use it with a Domain or Subdomain behind a reverse proxy (like Nginx, Traefik, Apache, ...) with a Lets Encrypt certificate. - ## Docker ### Build & Dockerfile The Dockerfile provided by Conduit has two stages, each of which creates an image. -1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. -2. **Runtime:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. - -The Dockerfile includes a few build arguments that should be supplied when building it. - -``` Dockerfile -ARG LOCAL=false -ARG CREATED -ARG VERSION -ARG GIT_REF=origin/master -``` -- **CREATED:** Date and time as string (date-time as defined by RFC 3339). Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.created`. Supply by it like this `$(date -u +'%Y-%m-%dT%H:%M:%SZ')` -- **VERSION:** The SemVer version of Conduit, which is in the image. Will be used to create the Open Container Initiative compliant label `org.opencontainers.image.version`. If you have a `Cargo.toml` in your build context, you can get it with `$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml)` -- **LOCAL:** *(Optional)* A boolean value, specifies if the local build context should be used, or if the official repository will be cloned. If not supplied with the build command, it will default to `false`. -- **GIT_REF:** *(Optional)* A git ref, like `HEAD` or a commit ID. The supplied ref will be used to create the Open Container Initiative compliant label `org.opencontainers.image.revision` and will be the ref that is cloned from the repository when not building from the local context. If not supplied with the build command, it will default to `origin/master`. +1. **Builder:** Builds the binary from local context or by cloning a git revision from the official repository. +2. **Runner:** Copies the built binary from **Builder** and sets up the runtime environment, like creating a volume to persist the database and applying the correct permissions. To build the image you can use the following command -``` bash -docker build . -t matrixconduit/matrix-conduit:latest --build-arg CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') --build-arg VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) +```bash +docker build --tag matrixconduit/matrix-conduit:latest . ``` which also will tag the resulting image as `matrixconduit/matrix-conduit:latest`. -**Note:** it ommits the two optional `build-arg`s. - ### Run After building the image you can simply run it with -``` bash -docker run -d -p 8448:6167 -v ~/conduit.toml:/srv/conduit/conduit.toml -v db:/srv/conduit/.local/share/conduit matrixconduit/matrix-conduit:latest +```bash +docker run -d -p 8448:6167 \ + -v db:/var/lib/matrix-conduit/ \ + -e CONDUIT_SERVER_NAME="your.server.name" \ + -e CONDUIT_DATABASE_BACKEND="rocksdb" \ + -e CONDUIT_ALLOW_REGISTRATION=true \ + -e CONDUIT_ALLOW_FEDERATION=true \ + -e CONDUIT_MAX_REQUEST_SIZE="20_000_000" \ + -e CONDUIT_TRUSTED_SERVERS="[\"matrix.org\"]" \ + -e CONDUIT_MAX_CONCURRENT_REQUESTS="100" \ + -e CONDUIT_LOG="warn,rocket=off,_=off,sled=off" \ + --name conduit matrixconduit/matrix-conduit:latest ``` or you can skip the build step and pull the image from one of the following registries: -| Registry | Image | Size | -| --------------- | ------------------------------------------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------- | -| Docker Hub | [matrixconduit/matrix-conduit:latest](https://hub.docker.com/r/matrixconduit/matrix-conduit) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | -| GitLab Registry | [registry.gitlab.com/famedly/conduit/conduit:latest](https://gitlab.com/famedly/conduit/container_registry/2134341) | ![Image Size](https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest) | +| Registry | Image | Size | +| --------------- | --------------------------------------------------------------- | --------------------- | +| Docker Hub | [matrixconduit/matrix-conduit:latest][dh] | ![Image Size][shield] | +| GitLab Registry | [registry.gitlab.com/famedly/conduit/matrix-conduit:latest][gl] | ![Image Size][shield] | + +[dh]: https://hub.docker.com/r/matrixconduit/matrix-conduit +[gl]: https://gitlab.com/famedly/conduit/container_registry/2497937 +[shield]: https://img.shields.io/docker/image-size/matrixconduit/matrix-conduit/latest The `-d` flag lets the container run in detached mode. You now need to supply a `conduit.toml` config file, an example can be found [here](../conduit-example.toml). You can pass in different env vars to change config values on the fly. You can even configure Conduit completely by using env vars, but for that you need @@ -56,29 +54,36 @@ to pass `-e CONDUIT_CONFIG=""` into your container. For an overview of possible If you just want to test Conduit for a short time, you can use the `--rm` flag, which will clean up everything related to your container after you stop it. - ## Docker-compose -If the docker command is not for you or your setup, you can also use one of the provided `docker-compose` files. Depending on your proxy setup, use the [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml) for Traefik (don't forget to remove `.traefik` from the filenames) or the normal [`docker-compose.yml`](../docker-compose.yml) for every other reverse proxy. Additional info about deploying -Conduit can be found [here](../DEPLOY.md). +If the `docker run` command is not for you or your setup, you can also use one of the provided `docker-compose` files. +Depending on your proxy setup, you can use one of the following files; +- If you already have a `traefik` instance set up, use [`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) +- If you don't have a `traefik` instance set up (or any other reverse proxy), use [`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml) +- For any other reverse proxy, use [`docker-compose.yml`](docker-compose.yml) + +When picking the traefik-related compose file, rename it so it matches `docker-compose.yml`, and +rename the override file to `docker-compose.override.yml`. Edit the latter with the values you want +for your server. + +Additional info about deploying Conduit can be found [here](../DEPLOY.md). ### Build To build the Conduit image with docker-compose, you first need to open and modify the `docker-compose.yml` file. There you need to comment the `image:` option and uncomment the `build:` option. Then call docker-compose with: -``` bash -CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up +```bash +docker-compose up ``` -This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. For possible `build-args`, please take a look at the above `Build & Dockerfile` section. - +This will also start the container right afterwards, so if want it to run in detached mode, you also should use the `-d` flag. ### Run If you already have built the image or want to use one from the registries, you can just start the container and everything else in the compose file in detached mode with: -``` bash +```bash docker-compose up -d ``` @@ -86,11 +91,16 @@ docker-compose up -d ### Use Traefik as Proxy -As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making containerized app and services available through the web. With the -two provided files, [`docker-compose.traefik.yml`](docker-compose.traefik.yml) and [`docker-compose.override.traefik.yml`](docker-compose.override.traefik.yml), it is -equally easy to deploy and use Conduit, with a little caveat. If you already took a look at the files, then you should have seen the `well-known` service, and that is -the little caveat. Traefik is simply a proxy and loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to either expose ports -`443` and `8448` or serve two endpoints `.well-known/matrix/client` and `.well-known/matrix/server`. +As a container user, you probably know about Traefik. It is a easy to use reverse proxy for making +containerized app and services available through the web. With the two provided files, +[`docker-compose.for-traefik.yml`](docker-compose.for-traefik.yml) (or +[`docker-compose.with-traefik.yml`](docker-compose.with-traefik.yml)) and +[`docker-compose.override.yml`](docker-compose.override.traefik.yml), it is equally easy to deploy +and use Conduit, with a little caveat. If you already took a look at the files, then you should have +seen the `well-known` service, and that is the little caveat. Traefik is simply a proxy and +loadbalancer and is not able to serve any kind of content, but for Conduit to federate, we need to +either expose ports `443` and `8448` or serve two endpoints `.well-known/matrix/client` and +`.well-known/matrix/server`. With the service `well-known` we use a single `nginx` container that will serve those two files. @@ -101,32 +111,30 @@ So...step by step: 3. Create the `conduit.toml` config file, an example can be found [here](../conduit-example.toml), or set `CONDUIT_CONFIG=""` and configure Conduit per env vars. 4. Uncomment the `element-web` service if you want to host your own Element Web Client and create a `element_config.json`. 5. Create the files needed by the `well-known` service. - - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```nginx - server { - server_name .; - listen 80 default_server; - - location /.well-known/matrix/ { - root /var/www; - default_type application/json; - add_header Access-Control-Allow-Origin *; - } - } - ``` - - `./nginx/www/.well-known/matrix/client` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.homeserver": { - "base_url": "https://." - } - } - ``` - - `./nginx/www/.well-known/matrix/server` (relative to the compose file, you can change this, but then also need to change the volume mapping) - ```json - { - "m.server": ".:443" - } - ``` + + - `./nginx/matrix.conf` (relative to the compose file, you can change this, but then also need to change the volume mapping) + + ```nginx + server { + server_name .; + listen 80 default_server; + + location /.well-known/matrix/server { + return 200 '{"m.server": ".:443"}'; + add_header Content-Type application/json; + } + + location /.well-known/matrix/client { + return 200 '{"m.homeserver": {"base_url": "https://."}}'; + add_header Content-Type application/json; + add_header "Access-Control-Allow-Origin" *; + } + + location / { + return 404; + } + } + ``` + 6. Run `docker-compose up -d` -7. Connect to your homeserver with your preferred client and create a user. You should do this immediatly after starting Conduit, because the first created user is the admin. +7. Connect to your homeserver with your preferred client and create a user. You should do this immediately after starting Conduit, because the first created user is the admin. diff --git a/docker/ci-binaries-packaging.Dockerfile b/docker/ci-binaries-packaging.Dockerfile index fb674396..4c1199ed 100644 --- a/docker/ci-binaries-packaging.Dockerfile +++ b/docker/ci-binaries-packaging.Dockerfile @@ -1,69 +1,84 @@ +# syntax=docker/dockerfile:1 # --------------------------------------------------------------------------------------------------------- # This Dockerfile is intended to be built as part of Conduit's CI pipeline. -# It does not build Conduit in Docker, but just copies the matching build artifact from the build job. -# As a consequence, this is not a multiarch capable image. It always expects and packages a x86_64 binary. +# It does not build Conduit in Docker, but just copies the matching build artifact from the build jobs. # # It is mostly based on the normal Conduit Dockerfile, but adjusted in a few places to maximise caching. # Credit's for the original Dockerfile: Weasy666. # --------------------------------------------------------------------------------------------------------- -FROM alpine:3.14 +FROM docker.io/alpine:3.16.0@sha256:4ff3ca91275773af45cb4b0834e12b7eb47d1c18f770a0b151381cd227f4c253 AS runner -# Install packages needed to run Conduit + +# Standard port on which Conduit launches. +# You still need to map the port when using the docker command or docker-compose. +EXPOSE 6167 + +# Users are expected to mount a volume to this directory: +ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit + +ENV CONDUIT_PORT=6167 \ + CONDUIT_ADDRESS="0.0.0.0" \ + CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ + CONDUIT_CONFIG='' +# └─> Set no config file to do all configuration with env vars + +# Conduit needs: +# ca-certificates: for https +# iproute2: for `ss` for the healthcheck script RUN apk add --no-cache \ - ca-certificates \ - curl \ - libgcc + ca-certificates \ + iproute2 ARG CREATED ARG VERSION ARG GIT_REF - -ENV CONDUIT_CONFIG="/srv/conduit/conduit.toml" - # Labels according to https://github.com/opencontainers/image-spec/blob/master/annotations.md # including a custom label specifying the build command LABEL org.opencontainers.image.created=${CREATED} \ - org.opencontainers.image.authors="Conduit Contributors" \ - org.opencontainers.image.title="Conduit" \ - org.opencontainers.image.version=${VERSION} \ - org.opencontainers.image.vendor="Conduit Contributors" \ - org.opencontainers.image.description="A Matrix homeserver written in Rust" \ - org.opencontainers.image.url="https://conduit.rs/" \ - org.opencontainers.image.revision=${GIT_REF} \ - org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ - org.opencontainers.image.licenses="Apache-2.0" \ - org.opencontainers.image.documentation="" \ - org.opencontainers.image.ref.name="" + org.opencontainers.image.authors="Conduit Contributors" \ + org.opencontainers.image.title="Conduit" \ + org.opencontainers.image.version=${VERSION} \ + org.opencontainers.image.vendor="Conduit Contributors" \ + org.opencontainers.image.description="A Matrix homeserver written in Rust" \ + org.opencontainers.image.url="https://conduit.rs/" \ + org.opencontainers.image.revision=${GIT_REF} \ + org.opencontainers.image.source="https://gitlab.com/famedly/conduit.git" \ + org.opencontainers.image.licenses="Apache-2.0" \ + org.opencontainers.image.documentation="https://gitlab.com/famedly/conduit" \ + org.opencontainers.image.ref.name="" -# Standard port on which Conduit launches. You still need to map the port when using the docker command or docker-compose. -EXPOSE 6167 -# create data folder for database -RUN mkdir -p /srv/conduit/.local/share/conduit - -# Copy the Conduit binary into the image at the latest possible moment to maximise caching: -COPY ./conduit-x86_64-unknown-linux-musl /srv/conduit/conduit -COPY ./docker/healthcheck.sh /srv/conduit/ +# Test if Conduit is still alive, uses the same endpoint as Element +COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh +HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh -# Add www-data user and group with UID 82, as used by alpine -# https://git.alpinelinux.org/aports/tree/main/nginx/nginx.pre-install +# Improve security: Don't run stuff as root, that does not need to run as root: +# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. +ARG USER_ID=1000 +ARG GROUP_ID=1000 RUN set -x ; \ - addgroup -Sg 82 www-data 2>/dev/null ; \ - adduser -S -D -H -h /srv/conduit -G www-data -g www-data www-data 2>/dev/null ; \ - addgroup www-data www-data 2>/dev/null && exit 0 ; exit 1 + deluser --remove-home www-data ; \ + addgroup -S -g ${GROUP_ID} conduit 2>/dev/null ; \ + adduser -S -u ${USER_ID} -D -H -h /srv/conduit -G conduit -g conduit conduit 2>/dev/null ; \ + addgroup conduit conduit 2>/dev/null && exit 0 ; exit 1 -# Change ownership of Conduit files to www-data user and group -RUN chown -cR www-data:www-data /srv/conduit -RUN chmod +x /srv/conduit/healthcheck.sh +# Change ownership of Conduit files to conduit user and group +RUN chown -cR conduit:conduit /srv/conduit && \ + chmod +x /srv/conduit/healthcheck.sh && \ + mkdir -p ${DEFAULT_DB_PATH} && \ + chown -cR conduit:conduit ${DEFAULT_DB_PATH} - -# Test if Conduit is still alive, uses the same endpoint as Element -HEALTHCHECK --start-period=5s --interval=60s CMD ./healthcheck.sh - -# Set user to www-data -USER www-data +# Change user to conduit +USER conduit # Set container home directory WORKDIR /srv/conduit -# Run Conduit + +# Run Conduit and print backtraces on panics +ENV RUST_BACKTRACE=1 ENTRYPOINT [ "/srv/conduit/conduit" ] + +# Depending on the target platform (e.g. "linux/arm/v7", "linux/arm64/v8", or "linux/amd64") +# copy the matching binary into this docker image +ARG TARGETPLATFORM +COPY --chown=conduit:conduit ./$TARGETPLATFORM /srv/conduit/conduit diff --git a/docker/docker-compose.for-traefik.yml b/docker/docker-compose.for-traefik.yml new file mode 100644 index 00000000..474299f6 --- /dev/null +++ b/docker/docker-compose.for-traefik.yml @@ -0,0 +1,68 @@ +# Conduit - Behind Traefik Reverse Proxy +version: '3' + +services: + homeserver: + ### If you already built the Conduit image with 'docker build' or want to use the Docker Hub image, + ### then you are ready to go. + image: matrixconduit/matrix-conduit:latest + ### If you want to build a fresh image from the sources, then comment the image line and uncomment the + ### build lines. If you want meaningful labels in your built Conduit image, you should run docker-compose like this: + ### CREATED=$(date -u +'%Y-%m-%dT%H:%M:%SZ') VERSION=$(grep -m1 -o '[0-9].[0-9].[0-9]' Cargo.toml) docker-compose up -d + # build: + # context: . + # args: + # CREATED: '2021-03-16T08:18:27Z' + # VERSION: '0.1.0' + # LOCAL: 'false' + # GIT_REF: origin/master + restart: unless-stopped + volumes: + - db:/var/lib/matrix-conduit/ + networks: + - proxy + environment: + CONDUIT_SERVER_NAME: your.server.name # EDIT THIS + CONDUIT_DATABASE_PATH: /var/lib/matrix-conduit/ + CONDUIT_DATABASE_BACKEND: rocksdb + CONDUIT_PORT: 6167 + CONDUIT_MAX_REQUEST_SIZE: 20_000_000 # in bytes, ~20 MB + CONDUIT_ALLOW_REGISTRATION: 'true' + CONDUIT_ALLOW_FEDERATION: 'true' + CONDUIT_TRUSTED_SERVERS: '["matrix.org"]' + #CONDUIT_MAX_CONCURRENT_REQUESTS: 100 + #CONDUIT_LOG: warn,rocket=off,_=off,sled=off + CONDUIT_ADDRESS: 0.0.0.0 + CONDUIT_CONFIG: '' # Ignore this + + # We need some way to server the client and server .well-known json. The simplest way is to use a nginx container + # to serve those two as static files. If you want to use a different way, delete or comment the below service, here + # and in the docker-compose override file. + well-known: + image: nginx:latest + restart: unless-stopped + volumes: + - ./nginx/matrix.conf:/etc/nginx/conf.d/matrix.conf # the config to serve the .well-known/matrix files + - ./nginx/www:/var/www/ # location of the client and server .well-known-files + ### Uncomment if you want to use your own Element-Web App. + ### Note: You need to provide a config.json for Element and you also need a second + ### Domain or Subdomain for the communication between Element and Conduit + ### Config-Docs: https://github.com/vector-im/element-web/blob/develop/docs/config.md + # element-web: + # image: vectorim/element-web:latest + # restart: unless-stopped + # volumes: + # - ./element_config.json:/app/config.json + # networks: + # - proxy + # depends_on: + # - homeserver + +volumes: + db: + +networks: + # This is the network Traefik listens to, if your network has a different + # name, don't forget to change it here and in the docker-compose.override.yml + proxy: + external: true diff --git a/docker/docker-compose.override.traefik.yml b/docker/docker-compose.override.yml similarity index 100% rename from docker/docker-compose.override.traefik.yml rename to docker/docker-compose.override.yml diff --git a/docker/docker-compose.traefik.yml b/docker/docker-compose.with-traefik.yml similarity index 78% rename from docker/docker-compose.traefik.yml rename to docker/docker-compose.with-traefik.yml index 392b3828..79ebef4b 100644 --- a/docker/docker-compose.traefik.yml +++ b/docker/docker-compose.with-traefik.yml @@ -33,7 +33,7 @@ services: # CONDUIT_PORT: 6167 # CONDUIT_CONFIG: '/srv/conduit/conduit.toml' # if you want to configure purely by env vars, set this to an empty string '' # Available levels are: error, warn, info, debug, trace - more info at: https://docs.rs/env_logger/*/env_logger/#enabling-logging - # CONDUIT_LOG: info # default is: "info,rocket=off,_=off,sled=off" + # CONDUIT_LOG: info # default is: "warn,_=off,sled=off" # CONDUIT_ALLOW_JAEGER: 'false' # CONDUIT_ALLOW_ENCRYPTION: 'false' # CONDUIT_ALLOW_FEDERATION: 'false' @@ -65,11 +65,33 @@ services: # depends_on: # - homeserver + traefik: + image: "traefik:latest" + container_name: "traefik" + restart: "unless-stopped" + ports: + - "80:80" + - "443:443" + volumes: + - "/var/run/docker.sock:/var/run/docker.sock" + # - "./traefik_config:/etc/traefik" + - "acme:/etc/traefik/acme" + labels: + - "traefik.enable=true" + + # middleware redirect + - "traefik.http.middlewares.redirect-to-https.redirectscheme.scheme=https" + # global redirect to https + - "traefik.http.routers.redirs.rule=hostregexp(`{host:.+}`)" + - "traefik.http.routers.redirs.entrypoints=http" + - "traefik.http.routers.redirs.middlewares=redirect-to-https" + + networks: + - proxy + volumes: db: + acme: networks: - # This is the network Traefik listens to, if your network has a different - # name, don't forget to change it here and in the docker-compose.override.yml - proxy: - external: true + proxy: \ No newline at end of file diff --git a/docker/healthcheck.sh b/docker/healthcheck.sh index 568838ec..42b2e103 100644 --- a/docker/healthcheck.sh +++ b/docker/healthcheck.sh @@ -1,13 +1,14 @@ #!/bin/sh -# If the port is not specified as env var, take it from the config file -if [ -z ${CONDUIT_PORT} ]; then - CONDUIT_PORT=$(grep -m1 -o 'port\s=\s[0-9]*' conduit.toml | grep -m1 -o '[0-9]*') +# If the config file does not contain a default port and the CONDUIT_PORT env is not set, create +# try to get port from process list +if [ -z "${CONDUIT_PORT}" ]; then + CONDUIT_PORT=$(ss -tlpn | grep conduit | grep -m1 -o ':[0-9]*' | grep -m1 -o '[0-9]*') fi # The actual health check. # We try to first get a response on HTTP and when that fails on HTTPS and when that fails, we exit with code 1. -# TODO: Change this to a single curl call. Do we have a config value that we can check for that? -curl --fail -s "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ - curl -k --fail -s "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ +# TODO: Change this to a single wget call. Do we have a config value that we can check for that? +wget --no-verbose --tries=1 --spider "http://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ + wget --no-verbose --tries=1 --spider "https://localhost:${CONDUIT_PORT}/_matrix/client/versions" || \ exit 1 diff --git a/rust-toolchain b/rust-toolchain deleted file mode 100644 index 74df8b16..00000000 --- a/rust-toolchain +++ /dev/null @@ -1 +0,0 @@ -1.53 diff --git a/src/appservice_server.rs b/src/api/appservice_server.rs similarity index 75% rename from src/appservice_server.rs rename to src/api/appservice_server.rs index ed886d6c..dc319e2c 100644 --- a/src/appservice_server.rs +++ b/src/api/appservice_server.rs @@ -1,16 +1,11 @@ -use crate::{utils, Error, Result}; +use crate::{services, utils, Error, Result}; use bytes::BytesMut; -use ruma::api::{IncomingResponse, OutgoingRequest, SendAccessToken}; -use std::{ - convert::{TryFrom, TryInto}, - fmt::Debug, - mem, - time::Duration, -}; +use ruma::api::{IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken}; +use std::{fmt::Debug, mem, time::Duration}; use tracing::warn; +#[tracing::instrument(skip(request))] pub(crate) async fn send_request( - globals: &crate::database::globals::Globals, registration: serde_yaml::Value, request: T, ) -> Result @@ -21,7 +16,11 @@ where let hs_token = registration.get("hs_token").unwrap().as_str().unwrap(); let mut http_request = request - .try_into_http_request::(destination, SendAccessToken::IfRequired("")) + .try_into_http_request::( + destination, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) .unwrap() .map(|body| body.freeze()); @@ -46,11 +45,23 @@ where *reqwest_request.timeout_mut() = Some(Duration::from_secs(30)); let url = reqwest_request.url().clone(); - let mut response = globals - .reqwest_client()? - .build()? + let mut response = match services() + .globals + .default_client() .execute(reqwest_request) - .await?; + .await + { + Ok(r) => r, + Err(e) => { + warn!( + "Could not send request to appservice {:?} at {}: {}", + registration.get("id"), + destination, + e + ); + return Err(e.into()); + } + }; // reqwest::Response -> http::Response conversion let status = response.status(); diff --git a/src/api/client_server/account.rs b/src/api/client_server/account.rs new file mode 100644 index 00000000..ce4dadda --- /dev/null +++ b/src/api/client_server/account.rs @@ -0,0 +1,420 @@ +use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; +use crate::{api::client_server, services, utils, Error, Result, Ruma}; +use ruma::{ + api::client::{ + account::{ + change_password, deactivate, get_3pids, get_username_availability, register, whoami, + ThirdPartyIdRemovalStatus, + }, + error::ErrorKind, + uiaa::{AuthFlow, AuthType, UiaaInfo}, + }, + events::{room::message::RoomMessageEventContent, GlobalAccountDataEventType}, + push, UserId, +}; +use tracing::{info, warn}; + +use register::RegistrationKind; + +const RANDOM_USER_ID_LENGTH: usize = 10; + +/// # `GET /_matrix/client/r0/register/available` +/// +/// Checks if a username is valid and available on this server. +/// +/// Conditions for returning true: +/// - The user id is not historical +/// - The server name of the user id matches this server +/// - No user or appservice on this server already claimed this username +/// +/// Note: This will not reserve the username, so the username might become invalid when trying to register +pub async fn get_register_available_route( + body: Ruma, +) -> Result { + // Validate user id + let user_id = UserId::parse_with_server_name( + body.username.to_lowercase(), + services().globals.server_name(), + ) + .ok() + .filter(|user_id| { + !user_id.is_historical() && user_id.server_name() == services().globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; + + // Check if username is creative enough + if services().users.exists(&user_id)? { + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); + } + + // TODO add check for appservice namespaces + + // If no if check is true we have an username that's available to be used. + Ok(get_username_availability::v3::Response { available: true }) +} + +/// # `POST /_matrix/client/r0/register` +/// +/// Register an account on this homeserver. +/// +/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html) +/// to check if the user id is valid and available. +/// +/// - Only works if registration is enabled +/// - If type is guest: ignores all parameters except initial_device_display_name +/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage) +/// - If type is not guest and no username is given: Always fails after UIAA check +/// - Creates a new account and populates it with default account data +/// - If `inhibit_login` is false: Creates a device and returns device id and access_token +pub async fn register_route( + body: Ruma, +) -> Result { + if !services().globals.allow_registration() && !body.from_appservice { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Registration has been disabled.", + )); + } + + let is_guest = body.kind == RegistrationKind::Guest; + + let user_id = match (&body.username, is_guest) { + (Some(username), false) => { + let proposed_user_id = UserId::parse_with_server_name( + username.to_lowercase(), + services().globals.server_name(), + ) + .ok() + .filter(|user_id| { + !user_id.is_historical() + && user_id.server_name() == services().globals.server_name() + }) + .ok_or(Error::BadRequest( + ErrorKind::InvalidUsername, + "Username is invalid.", + ))?; + if services().users.exists(&proposed_user_id)? { + return Err(Error::BadRequest( + ErrorKind::UserInUse, + "Desired user ID is already taken.", + )); + } + proposed_user_id + } + _ => loop { + let proposed_user_id = UserId::parse_with_server_name( + utils::random_string(RANDOM_USER_ID_LENGTH).to_lowercase(), + services().globals.server_name(), + ) + .unwrap(); + if !services().users.exists(&proposed_user_id)? { + break proposed_user_id; + } + }, + }; + + // UIAA + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Dummy], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if !body.from_appservice { + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = services().uiaa.try_auth( + &UserId::parse_with_server_name("", services().globals.server_name()) + .expect("we know this is valid"), + "".into(), + auth, + &uiaainfo, + )?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services().uiaa.create( + &UserId::parse_with_server_name("", services().globals.server_name()) + .expect("we know this is valid"), + "".into(), + &uiaainfo, + &json, + )?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + } + + let password = if is_guest { + None + } else { + body.password.as_deref() + }; + + // Create user + services().users.create(&user_id, password)?; + + // Default to pretty displayname + let mut displayname = user_id.localpart().to_owned(); + + // If enabled append lightning bolt to display name (default true) + if services().globals.enable_lightning_bolt() { + displayname.push_str(" ⚡️"); + } + + services() + .users + .set_displayname(&user_id, Some(displayname.clone()))?; + + // Initial account data + services().account_data.update( + None, + &user_id, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: push::Ruleset::server_default(&user_id), + }, + }) + .expect("to json always works"), + )?; + + // Inhibit login does not work for guests + if !is_guest && body.inhibit_login { + return Ok(register::v3::Response { + access_token: None, + user_id, + device_id: None, + refresh_token: None, + expires_in: None, + }); + } + + // Generate new device id if the user didn't specify one + let device_id = if is_guest { + None + } else { + body.device_id.clone() + } + .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); + + // Generate new token for the device + let token = utils::random_string(TOKEN_LENGTH); + + // Create device for this account + services().users.create_device( + &user_id, + &device_id, + &token, + body.initial_device_display_name.clone(), + )?; + + info!("New user {} registered on this server.", user_id); + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "New user {} registered on this server.", + user_id + ))); + + // If this is the first real user, grant them admin privileges + // Note: the server user, @conduit:servername, is generated first + if services().users.count()? == 2 { + services() + .admin + .make_user_admin(&user_id, displayname) + .await?; + + warn!("Granting {} admin privileges as the first user", user_id); + } + + Ok(register::v3::Response { + access_token: Some(token), + user_id, + device_id: Some(device_id), + refresh_token: None, + expires_in: None, + }) +} + +/// # `POST /_matrix/client/r0/account/password` +/// +/// Changes the password of this account. +/// +/// - Requires UIAA to verify user password +/// - Changes the password of the sender user +/// - The password hash is calculated using argon2 with 32 character salt, the plain password is +/// not saved +/// +/// If logout_devices is true it does the following for each device except the sender device: +/// - Invalidates access token +/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) +/// - Forgets to-device events +/// - Triggers device list updates +pub async fn change_password_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Password], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services() + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + services() + .users + .set_password(sender_user, Some(&body.new_password))?; + + if body.logout_devices { + // Logout all devices except the current one + for id in services() + .users + .all_device_ids(sender_user) + .filter_map(|id| id.ok()) + .filter(|id| id != sender_device) + { + services().users.remove_device(sender_user, &id)?; + } + } + + info!("User {} changed their password.", sender_user); + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "User {} changed their password.", + sender_user + ))); + + Ok(change_password::v3::Response {}) +} + +/// # `GET _matrix/client/r0/account/whoami` +/// +/// Get user_id of the sender user. +/// +/// Note: Also works for Application Services +pub async fn whoami_route(body: Ruma) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let device_id = body.sender_device.as_ref().cloned(); + + Ok(whoami::v3::Response { + user_id: sender_user.clone(), + device_id, + is_guest: services().users.is_deactivated(sender_user)? && !body.from_appservice, + }) +} + +/// # `POST /_matrix/client/r0/account/deactivate` +/// +/// Deactivate sender user account. +/// +/// - Leaves all rooms and rejects all invitations +/// - Invalidates all access tokens +/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts) +/// - Forgets all to-device events +/// - Triggers device list updates +/// - Removes ability to log in again +pub async fn deactivate_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let mut uiaainfo = UiaaInfo { + flows: vec![AuthFlow { + stages: vec![AuthType::Password], + }], + completed: Vec::new(), + params: Default::default(), + session: None, + auth_error: None, + }; + + if let Some(auth) = &body.auth { + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; + if !worked { + return Err(Error::Uiaa(uiaainfo)); + } + // Success! + } else if let Some(json) = body.json_body { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + services() + .uiaa + .create(sender_user, sender_device, &uiaainfo, &json)?; + return Err(Error::Uiaa(uiaainfo)); + } else { + return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); + } + + // Make the user leave all rooms before deactivation + client_server::leave_all_rooms(sender_user).await?; + + // Remove devices and mark account as deactivated + services().users.deactivate_account(sender_user)?; + + info!("User {} deactivated their account.", sender_user); + services() + .admin + .send_message(RoomMessageEventContent::notice_plain(format!( + "User {} deactivated their account.", + sender_user + ))); + + Ok(deactivate::v3::Response { + id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, + }) +} + +/// # `GET _matrix/client/r0/account/3pid` +/// +/// Get a list of third party identifiers associated with this account. +/// +/// - Currently always returns empty list +pub async fn third_party_route( + body: Ruma, +) -> Result { + let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + Ok(get_3pids::v3::Response::new(Vec::new())) +} diff --git a/src/client_server/alias.rs b/src/api/client_server/alias.rs similarity index 54% rename from src/client_server/alias.rs rename to src/api/client_server/alias.rs index 129ac166..b28606c1 100644 --- a/src/client_server/alias.rs +++ b/src/api/client_server/alias.rs @@ -1,49 +1,45 @@ -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Ruma}; +use crate::{services, Error, Result, Ruma}; use regex::Regex; use ruma::{ api::{ appservice, client::{ + alias::{create_alias, delete_alias, get_alias}, error::ErrorKind, - r0::alias::{create_alias, delete_alias, get_alias}, }, federation, }, RoomAliasId, }; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, put}; - /// # `PUT /_matrix/client/r0/directory/room/{roomAlias}` /// /// Creates a new room alias on this server. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/room/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn create_alias_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if body.room_alias.server_name() != db.globals.server_name() { + body: Ruma, +) -> Result { + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Alias is from another server.", )); } - if db.rooms.id_from_alias(&body.room_alias)?.is_some() { + if services() + .rooms + .alias + .resolve_local_alias(&body.room_alias)? + .is_some() + { return Err(Error::Conflict("Alias already exists.")); } - db.rooms - .set_alias(&body.room_alias, Some(&body.room_id), &db.globals)?; - - db.flush()?; + services() + .rooms + .alias + .set_alias(&body.room_alias, &body.room_id)?; - Ok(create_alias::Response::new().into()) + Ok(create_alias::v3::Response::new()) } /// # `DELETE /_matrix/client/r0/directory/room/{roomAlias}` @@ -52,29 +48,21 @@ pub async fn create_alias_route( /// /// - TODO: additional access control checks /// - TODO: Update canonical alias event -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/directory/room/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn delete_alias_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if body.room_alias.server_name() != db.globals.server_name() { + body: Ruma, +) -> Result { + if body.room_alias.server_name() != services().globals.server_name() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Alias is from another server.", )); } - db.rooms.set_alias(&body.room_alias, None, &db.globals)?; + services().rooms.alias.remove_alias(&body.room_alias)?; // TODO: update alt_aliases? - db.flush()?; - - Ok(delete_alias::Response::new().into()) + Ok(delete_alias::v3::Response::new()) } /// # `GET /_matrix/client/r0/directory/room/{roomAlias}` @@ -82,40 +70,33 @@ pub async fn delete_alias_route( /// Resolve an alias locally or over federation. /// /// - TODO: Suggest more servers to join via -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/room/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_alias_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - get_alias_helper(&db, &body.room_alias).await + body: Ruma, +) -> Result { + get_alias_helper(&body.room_alias).await } -pub(crate) async fn get_alias_helper( - db: &Database, - room_alias: &RoomAliasId, -) -> ConduitResult { - if room_alias.server_name() != db.globals.server_name() { - let response = db +pub(crate) async fn get_alias_helper(room_alias: &RoomAliasId) -> Result { + if room_alias.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, room_alias.server_name(), federation::query::get_room_information::v1::Request { room_alias }, ) .await?; - return Ok(get_alias::Response::new(response.room_id, response.servers).into()); + return Ok(get_alias::v3::Response::new( + response.room_id, + response.servers, + )); } let mut room_id = None; - match db.rooms.id_from_alias(room_alias)? { + match services().rooms.alias.resolve_local_alias(room_alias)? { Some(r) => room_id = Some(r), None => { - for (_id, registration) in db.appservice.all()? { + for (_id, registration) in services().appservice.all()? { let aliases = registration .get("namespaces") .and_then(|ns| ns.get("aliases")) @@ -130,19 +111,24 @@ pub(crate) async fn get_alias_helper( if aliases .iter() .any(|aliases| aliases.is_match(room_alias.as_str())) - && db + && services() .sending .send_appservice_request( - &db.globals, registration, appservice::query::query_room_alias::v1::Request { room_alias }, ) .await .is_ok() { - room_id = Some(db.rooms.id_from_alias(room_alias)?.ok_or_else(|| { - Error::bad_config("Appservice lied to us. Room does not exist.") - })?); + room_id = Some( + services() + .rooms + .alias + .resolve_local_alias(room_alias)? + .ok_or_else(|| { + Error::bad_config("Appservice lied to us. Room does not exist.") + })?, + ); break; } } @@ -159,5 +145,8 @@ pub(crate) async fn get_alias_helper( } }; - Ok(get_alias::Response::new(room_id, vec![db.globals.server_name().to_owned()]).into()) + Ok(get_alias::v3::Response::new( + room_id, + vec![services().globals.server_name().to_owned()], + )) } diff --git a/src/api/client_server/backup.rs b/src/api/client_server/backup.rs new file mode 100644 index 00000000..f3d5ddc5 --- /dev/null +++ b/src/api/client_server/backup.rs @@ -0,0 +1,362 @@ +use crate::{services, Error, Result, Ruma}; +use ruma::api::client::{ + backup::{ + add_backup_keys, add_backup_keys_for_room, add_backup_keys_for_session, + create_backup_version, delete_backup_keys, delete_backup_keys_for_room, + delete_backup_keys_for_session, delete_backup_version, get_backup_info, get_backup_keys, + get_backup_keys_for_room, get_backup_keys_for_session, get_latest_backup_info, + update_backup_version, + }, + error::ErrorKind, +}; + +/// # `POST /_matrix/client/r0/room_keys/version` +/// +/// Creates a new backup. +pub async fn create_backup_version_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let version = services() + .key_backups + .create_backup(sender_user, &body.algorithm)?; + + Ok(create_backup_version::v3::Response { version }) +} + +/// # `PUT /_matrix/client/r0/room_keys/version/{version}` +/// +/// Update information about an existing backup. Only `auth_data` can be modified. +pub async fn update_backup_version_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + services() + .key_backups + .update_backup(sender_user, &body.version, &body.algorithm)?; + + Ok(update_backup_version::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/room_keys/version` +/// +/// Get information about the latest backup version. +pub async fn get_latest_backup_info_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let (version, algorithm) = services() + .key_backups + .get_latest_backup(sender_user)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; + + Ok(get_latest_backup_info::v3::Response { + algorithm, + count: (services().key_backups.count_keys(sender_user, &version)? as u32).into(), + etag: services().key_backups.get_etag(sender_user, &version)?, + version, + }) +} + +/// # `GET /_matrix/client/r0/room_keys/version` +/// +/// Get information about an existing backup. +pub async fn get_backup_info_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let algorithm = services() + .key_backups + .get_backup(sender_user, &body.version)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Key backup does not exist.", + ))?; + + Ok(get_backup_info::v3::Response { + algorithm, + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + version: body.version.to_owned(), + }) +} + +/// # `DELETE /_matrix/client/r0/room_keys/version/{version}` +/// +/// Delete an existing key backup. +/// +/// - Deletes both information about the backup, as well as all key data related to the backup +pub async fn delete_backup_version_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .key_backups + .delete_backup(sender_user, &body.version)?; + + Ok(delete_backup_version::v3::Response {}) +} + +/// # `PUT /_matrix/client/r0/room_keys/keys` +/// +/// Add the received backup keys to the database. +/// +/// - Only manipulating the most recently created version of the backup is allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag +pub async fn add_backup_keys_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if Some(&body.version) + != services() + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + + for (room_id, room) in &body.rooms { + for (session_id, key_data) in &room.sessions { + services().key_backups.add_key( + sender_user, + &body.version, + room_id, + session_id, + key_data, + )? + } + } + + Ok(add_backup_keys::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` +/// +/// Add the received backup keys to the database. +/// +/// - Only manipulating the most recently created version of the backup is allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag +pub async fn add_backup_keys_for_room_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if Some(&body.version) + != services() + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + + for (session_id, key_data) in &body.sessions { + services().key_backups.add_key( + sender_user, + &body.version, + &body.room_id, + session_id, + key_data, + )? + } + + Ok(add_backup_keys_for_room::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// +/// Add the received backup key to the database. +/// +/// - Only manipulating the most recently created version of the backup is allowed +/// - Adds the keys to the backup +/// - Returns the new number of keys in this backup and the etag +pub async fn add_backup_keys_for_session_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if Some(&body.version) + != services() + .key_backups + .get_latest_backup_version(sender_user)? + .as_ref() + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "You may only manipulate the most recently created version of the backup.", + )); + } + + services().key_backups.add_key( + sender_user, + &body.version, + &body.room_id, + &body.session_id, + &body.session_data, + )?; + + Ok(add_backup_keys_for_session::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `GET /_matrix/client/r0/room_keys/keys` +/// +/// Retrieves all keys from the backup. +pub async fn get_backup_keys_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let rooms = services().key_backups.get_all(sender_user, &body.version)?; + + Ok(get_backup_keys::v3::Response { rooms }) +} + +/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` +/// +/// Retrieves all keys from the backup for a given room. +pub async fn get_backup_keys_for_room_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let sessions = services() + .key_backups + .get_room(sender_user, &body.version, &body.room_id)?; + + Ok(get_backup_keys_for_room::v3::Response { sessions }) +} + +/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// +/// Retrieves a key from the backup. +pub async fn get_backup_keys_for_session_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let key_data = services() + .key_backups + .get_session(sender_user, &body.version, &body.room_id, &body.session_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Backup key not found for this user's session.", + ))?; + + Ok(get_backup_keys_for_session::v3::Response { key_data }) +} + +/// # `DELETE /_matrix/client/r0/room_keys/keys` +/// +/// Delete the keys from the backup. +pub async fn delete_backup_keys_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .key_backups + .delete_all_keys(sender_user, &body.version)?; + + Ok(delete_backup_keys::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` +/// +/// Delete the keys from the backup for a given room. +pub async fn delete_backup_keys_for_room_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services() + .key_backups + .delete_room_keys(sender_user, &body.version, &body.room_id)?; + + Ok(delete_backup_keys_for_room::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} + +/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` +/// +/// Delete a key from the backup. +pub async fn delete_backup_keys_for_session_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + services().key_backups.delete_room_key( + sender_user, + &body.version, + &body.room_id, + &body.session_id, + )?; + + Ok(delete_backup_keys_for_session::v3::Response { + count: (services() + .key_backups + .count_keys(sender_user, &body.version)? as u32) + .into(), + etag: services() + .key_backups + .get_etag(sender_user, &body.version)?, + }) +} diff --git a/src/api/client_server/capabilities.rs b/src/api/client_server/capabilities.rs new file mode 100644 index 00000000..31d42d2f --- /dev/null +++ b/src/api/client_server/capabilities.rs @@ -0,0 +1,28 @@ +use crate::{services, Result, Ruma}; +use ruma::api::client::discovery::get_capabilities::{ + self, Capabilities, RoomVersionStability, RoomVersionsCapability, +}; +use std::collections::BTreeMap; + +/// # `GET /_matrix/client/r0/capabilities` +/// +/// Get information on the supported feature set and other relevent capabilities of this server. +pub async fn get_capabilities_route( + _body: Ruma, +) -> Result { + let mut available = BTreeMap::new(); + for room_version in &services().globals.unstable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Unstable); + } + for room_version in &services().globals.stable_room_versions { + available.insert(room_version.clone(), RoomVersionStability::Stable); + } + + let mut capabilities = Capabilities::new(); + capabilities.room_versions = RoomVersionsCapability { + default: services().globals.default_room_version(), + available, + }; + + Ok(get_capabilities::v3::Response { capabilities }) +} diff --git a/src/client_server/config.rs b/src/api/client_server/config.rs similarity index 63% rename from src/client_server/config.rs rename to src/api/client_server/config.rs index 0c668ff1..dbd2b2cc 100644 --- a/src/client_server/config.rs +++ b/src/api/client_server/config.rs @@ -1,11 +1,11 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{ - error::ErrorKind, - r0::config::{ + config::{ get_global_account_data, get_room_account_data, set_global_account_data, set_room_account_data, }, + error::ErrorKind, }, events::{AnyGlobalAccountDataEventContent, AnyRoomAccountDataEventContent}, serde::Raw, @@ -13,29 +13,20 @@ use ruma::{ use serde::Deserialize; use serde_json::{json, value::RawValue as RawJsonValue}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Sets some account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn set_global_account_data_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data: serde_json::Value = serde_json::from_str(body.data.get()) + let data: serde_json::Value = serde_json::from_str(body.data.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); - db.account_data.update( + services().account_data.update( None, sender_user, event_type.clone().into(), @@ -43,37 +34,25 @@ pub async fn set_global_account_data_route( "type": event_type, "content": data, }), - &db.globals, )?; - db.flush()?; - - Ok(set_global_account_data::Response {}.into()) + Ok(set_global_account_data::v3::Response {}) } /// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Sets some room account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - put( - "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", - data = "" - ) -)] -#[tracing::instrument(skip(db, body))] pub async fn set_room_account_data_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let data: serde_json::Value = serde_json::from_str(body.data.get()) + let data: serde_json::Value = serde_json::from_str(body.data.json().get()) .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Data is invalid."))?; let event_type = body.event_type.to_string(); - db.account_data.update( + services().account_data.update( Some(&body.room_id), sender_user, event_type.clone().into(), @@ -81,29 +60,20 @@ pub async fn set_room_account_data_route( "type": event_type, "content": data, }), - &db.globals, )?; - db.flush()?; - - Ok(set_room_account_data::Response {}.into()) + Ok(set_room_account_data::v3::Response {}) } /// # `GET /_matrix/client/r0/user/{userId}/account_data/{type}` /// /// Gets some account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/account_data/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_global_account_data_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = db + let event: Box = services() .account_data .get(None, sender_user, body.event_type.clone().into())? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Data not found."))?; @@ -112,27 +82,18 @@ pub async fn get_global_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_global_account_data::Response { account_data }.into()) + Ok(get_global_account_data::v3::Response { account_data }) } /// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/account_data/{type}` /// /// Gets some room account data for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get( - "/_matrix/client/r0/user/<_>/rooms/<_>/account_data/<_>", - data = "" - ) -)] -#[tracing::instrument(skip(db, body))] pub async fn get_room_account_data_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: Box = db + let event: Box = services() .account_data .get( Some(&body.room_id), @@ -145,7 +106,7 @@ pub async fn get_room_account_data_route( .map_err(|_| Error::bad_database("Invalid account data event in db."))? .content; - Ok(get_room_account_data::Response { account_data }.into()) + Ok(get_room_account_data::v3::Response { account_data }) } #[derive(Deserialize)] diff --git a/src/api/client_server/context.rs b/src/api/client_server/context.rs new file mode 100644 index 00000000..2e0f2576 --- /dev/null +++ b/src/api/client_server/context.rs @@ -0,0 +1,203 @@ +use crate::{services, Error, Result, Ruma}; +use ruma::{ + api::client::{context::get_context, error::ErrorKind, filter::LazyLoadOptions}, + events::StateEventType, +}; +use std::{collections::HashSet, convert::TryFrom}; +use tracing::error; + +/// # `GET /_matrix/client/r0/rooms/{roomId}/context` +/// +/// Allows loading room history around an event. +/// +/// - Only works if the user is joined (TODO: always allow, but only show events if the user was +/// joined, depending on history_visibility) +pub async fn get_context_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + let (lazy_load_enabled, lazy_load_send_redundant) = match &body.filter.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members, + } => (true, *include_redundant_members), + _ => (false, false), + }; + + let mut lazy_loaded = HashSet::new(); + + let base_pdu_id = services() + .rooms + .timeline + .get_pdu_id(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Base event id not found.", + ))?; + + let base_token = services().rooms.timeline.pdu_count(&base_pdu_id)?; + + let base_event = services() + .rooms + .timeline + .get_pdu_from_id(&base_pdu_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Base event not found.", + ))?; + + let room_id = base_event.room_id.clone(); + + if !services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &room_id, + &base_event.sender, + )? || lazy_load_send_redundant + { + lazy_loaded.insert(base_event.sender.as_str().to_owned()); + } + + let base_event = base_event.to_room_event(); + + let events_before: Vec<_> = services() + .rooms + .timeline + .pdus_until(sender_user, &room_id, base_token)? + .take( + u32::try_from(body.limit).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize + / 2, + ) + .filter_map(|r| r.ok()) // Remove buggy events + .collect(); + + for (_, event) in &events_before { + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + lazy_loaded.insert(event.sender.as_str().to_owned()); + } + } + + let start_token = events_before + .last() + .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) + .map(|count| count.to_string()); + + let events_before: Vec<_> = events_before + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(); + + let events_after: Vec<_> = services() + .rooms + .timeline + .pdus_after(sender_user, &room_id, base_token)? + .take( + u32::try_from(body.limit).map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") + })? as usize + / 2, + ) + .filter_map(|r| r.ok()) // Remove buggy events + .collect(); + + for (_, event) in &events_after { + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + lazy_loaded.insert(event.sender.as_str().to_owned()); + } + } + + let shortstatehash = match services().rooms.state_accessor.pdu_shortstatehash( + events_after + .last() + .map_or(&*body.event_id, |(_, e)| &*e.event_id), + )? { + Some(s) => s, + None => services() + .rooms + .state + .get_room_shortstatehash(&room_id)? + .expect("All rooms have state"), + }; + + let state_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await?; + + let end_token = events_after + .last() + .and_then(|(pdu_id, _)| services().rooms.timeline.pdu_count(pdu_id).ok()) + .map(|count| count.to_string()); + + let events_after: Vec<_> = events_after + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(); + + let mut state = Vec::new(); + + for (shortstatekey, id) in state_ids { + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; + + if event_type != StateEventType::RoomMember { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state.push(pdu.to_state_event()); + } else if !lazy_load_enabled || lazy_loaded.contains(&state_key) { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state.push(pdu.to_state_event()); + } + } + + let resp = get_context::v3::Response { + start: start_token, + end: end_token, + events_before, + event: Some(base_event), + events_after, + state, + }; + + Ok(resp) +} diff --git a/src/client_server/device.rs b/src/api/client_server/device.rs similarity index 60% rename from src/client_server/device.rs rename to src/api/client_server/device.rs index 03a3004b..d4c41786 100644 --- a/src/client_server/device.rs +++ b/src/api/client_server/device.rs @@ -1,91 +1,68 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; +use crate::{services, utils, Error, Result, Ruma}; use ruma::api::client::{ + device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, error::ErrorKind, - r0::{ - device::{self, delete_device, delete_devices, get_device, get_devices, update_device}, - uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }; use super::SESSION_ID_LENGTH; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; /// # `GET /_matrix/client/r0/devices` /// /// Get metadata on all devices of the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/devices", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_devices_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let devices: Vec = db + let devices: Vec = services() .users .all_devices_metadata(sender_user) .filter_map(|r| r.ok()) // Filter out buggy devices .collect(); - Ok(get_devices::Response { devices }.into()) + Ok(get_devices::v3::Response { devices }) } /// # `GET /_matrix/client/r0/devices/{deviceId}` /// /// Get metadata on a single device of the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/devices/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_device_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let device = db + let device = services() .users .get_device_metadata(sender_user, &body.body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; - Ok(get_device::Response { device }.into()) + Ok(get_device::v3::Response { device }) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` /// /// Updates the metadata on a given device of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/devices/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn update_device_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut device = db + let mut device = services() .users .get_device_metadata(sender_user, &body.device_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Device not found."))?; device.display_name = body.display_name.clone(); - db.users + services() + .users .update_device_metadata(sender_user, &body.device_id, &device)?; - db.flush()?; - - Ok(update_device::Response {}.into()) + Ok(update_device::v3::Response {}) } -/// # `PUT /_matrix/client/r0/devices/{deviceId}` +/// # `DELETE /_matrix/client/r0/devices/{deviceId}` /// /// Deletes the given device. /// @@ -94,15 +71,9 @@ pub async fn update_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/devices/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn delete_device_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -118,32 +89,29 @@ pub async fn delete_device_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); } - db.users.remove_device(sender_user, &body.device_id)?; - - db.flush()?; + services() + .users + .remove_device(sender_user, &body.device_id)?; - Ok(delete_device::Response {}.into()) + Ok(delete_device::v3::Response {}) } /// # `PUT /_matrix/client/r0/devices/{deviceId}` @@ -157,15 +125,9 @@ pub async fn delete_device_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/delete_devices", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn delete_devices_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -181,21 +143,18 @@ pub async fn delete_devices_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -203,10 +162,8 @@ pub async fn delete_devices_route( } for device_id in &body.devices { - db.users.remove_device(sender_user, device_id)? + services().users.remove_device(sender_user, device_id)? } - db.flush()?; - - Ok(delete_devices::Response {}.into()) + Ok(delete_devices::v3::Response {}) } diff --git a/src/client_server/directory.rs b/src/api/client_server/directory.rs similarity index 64% rename from src/client_server/directory.rs rename to src/api/client_server/directory.rs index 490f7524..781e9666 100644 --- a/src/client_server/directory.rs +++ b/src/api/client_server/directory.rs @@ -1,55 +1,46 @@ -use std::convert::TryInto; - -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::{ client::{ - error::ErrorKind, - r0::{ - directory::{ - get_public_rooms, get_public_rooms_filtered, get_room_visibility, - set_room_visibility, - }, - room, + directory::{ + get_public_rooms, get_public_rooms_filtered, get_room_visibility, + set_room_visibility, }, + error::ErrorKind, + room, }, federation, }, - directory::{Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomsChunk, RoomNetwork}, + directory::{ + Filter, IncomingFilter, IncomingRoomNetwork, PublicRoomJoinRule, PublicRoomsChunk, + RoomNetwork, + }, events::{ room::{ avatar::RoomAvatarEventContent, canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, guest_access::{GuestAccess, RoomGuestAccessEventContent}, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, name::RoomNameEventContent, topic::RoomTopicEventContent, }, - EventType, + StateEventType, }, ServerName, UInt, }; -use tracing::{info, warn}; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; +use tracing::{error, info, warn}; /// # `POST /_matrix/client/r0/publicRooms` /// /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/publicRooms", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_filtered_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { get_public_rooms_filtered_helper( - &db, body.server.as_deref(), body.limit, body.since.as_deref(), @@ -64,33 +55,24 @@ pub async fn get_public_rooms_filtered_route( /// Lists the public rooms on this server. /// /// - Rooms are ordered by the number of joined members -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/publicRooms", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_public_rooms_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let response = get_public_rooms_filtered_helper( - &db, body.server.as_deref(), body.limit, body.since.as_deref(), &IncomingFilter::default(), &IncomingRoomNetwork::Matrix, ) - .await? - .0; + .await?; - Ok(get_public_rooms::Response { + Ok(get_public_rooms::v3::Response { chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()) + }) } /// # `PUT /_matrix/client/r0/directory/list/room/{roomId}` @@ -98,23 +80,25 @@ pub async fn get_public_rooms_route( /// Sets the visibility of a given room in the room directory. /// /// - TODO: Access control checks -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn set_room_visibility_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + if !db.rooms.exists(&body.room_id)? { + // Return 404 if the room doesn't exist + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room not found", + )); + } + match &body.visibility { room::Visibility::Public => { - db.rooms.set_public(&body.room_id, true)?; + services().rooms.directory.set_public(&body.room_id)?; info!("{} made {} public", sender_user, body.room_id); } - room::Visibility::Private => db.rooms.set_public(&body.room_id, false)?, + room::Visibility::Private => services().rooms.directory.set_not_public(&body.room_id)?, _ => { return Err(Error::BadRequest( ErrorKind::InvalidParam, @@ -123,78 +107,65 @@ pub async fn set_room_visibility_route( } } - db.flush()?; - - Ok(set_room_visibility::Response {}.into()) + Ok(set_room_visibility::v3::Response {}) } /// # `GET /_matrix/client/r0/directory/list/room/{roomId}` /// /// Gets the visibility of a given room in the room directory. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/directory/list/room/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_room_visibility_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - Ok(get_room_visibility::Response { - visibility: if db.rooms.is_public_room(&body.room_id)? { + body: Ruma, +) -> Result { + + if !db.rooms.exists(&body.room_id)? { + // Return 404 if the room doesn't exist + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room not found", + )); + } + + Ok(get_room_visibility::v3::Response { + visibility: if services().rooms.directory.is_public_room(&body.room_id)? { room::Visibility::Public } else { room::Visibility::Private }, - } - .into()) + }) } pub(crate) async fn get_public_rooms_filtered_helper( - db: &Database, server: Option<&ServerName>, limit: Option, since: Option<&str>, filter: &IncomingFilter, _network: &IncomingRoomNetwork, -) -> ConduitResult { - if let Some(other_server) = server.filter(|server| *server != db.globals.server_name().as_str()) +) -> Result { + if let Some(other_server) = + server.filter(|server| *server != services().globals.server_name().as_str()) { - let response = db + let response = services() .sending .send_federation_request( - &db.globals, other_server, federation::directory::get_public_rooms_filtered::v1::Request { limit, - since: since.as_deref(), + since, filter: Filter { generic_search_term: filter.generic_search_term.as_deref(), + room_types: filter.room_types.clone(), }, room_network: RoomNetwork::Matrix, }, ) .await?; - return Ok(get_public_rooms_filtered::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c) - .expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), + return Ok(get_public_rooms_filtered::v3::Response { + chunk: response.chunk, prev_batch: response.prev_batch, next_batch: response.next_batch, total_room_count_estimate: response.total_room_count_estimate, - } - .into()); + }); } let limit = limit.map_or(10, u64::from); @@ -223,17 +194,18 @@ pub(crate) async fn get_public_rooms_filtered_helper( } } - let mut all_rooms: Vec<_> = db + let mut all_rooms: Vec<_> = services() .rooms + .directory .public_rooms() .map(|room_id| { let room_id = room_id?; let chunk = PublicRoomsChunk { - aliases: Vec::new(), - canonical_alias: db + canonical_alias: services() .rooms - .room_state_get(&room_id, &EventType::RoomCanonicalAlias, "")? + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomCanonicalAliasEventContent| c.alias) @@ -241,9 +213,10 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: db + name: services() .rooms - .room_state_get(&room_id, &EventType::RoomName, "")? + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomNameEventContent| c.name) @@ -251,8 +224,9 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room name event in database.") }) })?, - num_joined_members: db + num_joined_members: services() .rooms + .state_cache .room_joined_count(&room_id)? .unwrap_or_else(|| { warn!("Room {} has no member count", room_id); @@ -260,9 +234,10 @@ pub(crate) async fn get_public_rooms_filtered_helper( }) .try_into() .expect("user count should not be that big"), - topic: db + topic: services() .rooms - .room_state_get(&room_id, &EventType::RoomTopic, "")? + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) @@ -270,9 +245,10 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room topic event in database.") }) })?, - world_readable: db + world_readable: services() .rooms - .room_state_get(&room_id, &EventType::RoomHistoryVisibility, "")? + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| { @@ -284,9 +260,10 @@ pub(crate) async fn get_public_rooms_filtered_helper( ) }) })?, - guest_can_join: db + guest_can_join: services() .rooms - .room_state_get(&room_id, &EventType::RoomGuestAccess, "")? + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomGuestAccessEventContent| { @@ -296,9 +273,10 @@ pub(crate) async fn get_public_rooms_filtered_helper( Error::bad_database("Invalid room guest access event in database.") }) })?, - avatar_url: db + avatar_url: services() .rooms - .room_state_get(&room_id, &EventType::RoomAvatar, "")? + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomAvatarEventContent| c.url) @@ -309,6 +287,39 @@ pub(crate) async fn get_public_rooms_filtered_helper( .transpose()? // url is now an Option so we must flatten .flatten(), + join_rule: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? + .map(|s| { + serde_json::from_str(s.content.get()) + .map(|c: RoomJoinRulesEventContent| match c.join_rule { + JoinRule::Public => Some(PublicRoomJoinRule::Public), + JoinRule::Knock => Some(PublicRoomJoinRule::Knock), + _ => None, + }) + .map_err(|e| { + error!("Invalid room join rule event in database: {}", e); + Error::BadDatabase("Invalid room join rule event in database.") + }) + }) + .transpose()? + .flatten() + .ok_or_else(|| Error::bad_database("Missing room join rule event for room."))?, + room_type: services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + .map(|s| { + serde_json::from_str::(s.content.get()).map_err( + |e| { + error!("Invalid room create event in database: {}", e); + Error::BadDatabase("Invalid room create event in database.") + }, + ) + }) + .transpose()? + .and_then(|e| e.room_type), room_id, }; Ok(chunk) @@ -369,11 +380,10 @@ pub(crate) async fn get_public_rooms_filtered_helper( Some(format!("n{}", num_since + limit)) }; - Ok(get_public_rooms_filtered::Response { + Ok(get_public_rooms_filtered::v3::Response { chunk, prev_batch, next_batch, total_room_count_estimate: Some(total_room_count_estimate), - } - .into()) + }) } diff --git a/src/api/client_server/filter.rs b/src/api/client_server/filter.rs new file mode 100644 index 00000000..a0d5a192 --- /dev/null +++ b/src/api/client_server/filter.rs @@ -0,0 +1,34 @@ +use crate::{services, Error, Result, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + filter::{create_filter, get_filter}, +}; + +/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` +/// +/// Loads a filter that was previously created. +/// +/// - A user can only access their own filters +pub async fn get_filter_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let filter = match services().users.get_filter(sender_user, &body.filter_id)? { + Some(filter) => filter, + None => return Err(Error::BadRequest(ErrorKind::NotFound, "Filter not found.")), + }; + + Ok(get_filter::v3::Response::new(filter)) +} + +/// # `PUT /_matrix/client/r0/user/{userId}/filter` +/// +/// Creates a new filter to be used by other endpoints. +pub async fn create_filter_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + Ok(create_filter::v3::Response::new( + services().users.create_filter(sender_user, &body.filter)?, + )) +} diff --git a/src/client_server/keys.rs b/src/api/client_server/keys.rs similarity index 57% rename from src/client_server/keys.rs rename to src/api/client_server/keys.rs index a44f5e9c..b649166a 100644 --- a/src/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -1,83 +1,61 @@ use super::SESSION_ID_LENGTH; -use crate::{database::DatabaseGuard, utils, ConduitResult, Database, Error, Result, Ruma}; -use rocket::futures::{prelude::*, stream::FuturesUnordered}; +use crate::{services, utils, Error, Result, Ruma}; +use futures_util::{stream::FuturesUnordered, StreamExt}; use ruma::{ api::{ client::{ error::ErrorKind, - r0::{ - keys::{ - claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, - upload_signing_keys, - }, - uiaa::{AuthFlow, AuthType, UiaaInfo}, + keys::{ + claim_keys, get_key_changes, get_keys, upload_keys, upload_signatures, + upload_signing_keys, }, + uiaa::{AuthFlow, AuthType, UiaaInfo}, }, federation, }, - encryption::UnsignedDeviceInfo, - DeviceId, DeviceKeyAlgorithm, UserId, + serde::Raw, + DeviceKeyAlgorithm, OwnedDeviceId, OwnedUserId, UserId, }; use serde_json::json; use std::collections::{BTreeMap, HashMap, HashSet}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `POST /_matrix/client/r0/keys/upload` /// /// Publish end-to-end encryption keys for the sender device. /// /// - Adds one time keys /// - If there are no device keys yet: Adds device keys (TODO: merge with existing keys?) -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/upload", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn upload_keys_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - if let Some(one_time_keys) = &body.one_time_keys { - for (key_key, key_value) in one_time_keys { - db.users.add_one_time_key( - sender_user, - sender_device, - key_key, - key_value, - &db.globals, - )?; - } + for (key_key, key_value) in &body.one_time_keys { + services() + .users + .add_one_time_key(sender_user, sender_device, key_key, key_value)?; } if let Some(device_keys) = &body.device_keys { // TODO: merge this and the existing event? // This check is needed to assure that signatures are kept - if db + if services() .users .get_device_keys(sender_user, sender_device)? .is_none() { - db.users.add_device_keys( - sender_user, - sender_device, - device_keys, - &db.rooms, - &db.globals, - )?; + services() + .users + .add_device_keys(sender_user, sender_device, device_keys)?; } } - db.flush()?; - - Ok(upload_keys::Response { - one_time_key_counts: db.users.count_one_time_keys(sender_user, sender_device)?, - } - .into()) + Ok(upload_keys::v3::Response { + one_time_key_counts: services() + .users + .count_one_time_keys(sender_user, sender_device)?, + }) } /// # `POST /_matrix/client/r0/keys/query` @@ -87,45 +65,26 @@ pub async fn upload_keys_route( /// - Always fetches users from other servers over federation /// - Gets master keys, self-signing keys, user signing keys and device keys. /// - The master and self-signing keys contain signatures that the user is allowed to see -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/query", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_keys_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let response = get_keys_helper( - Some(sender_user), - &body.device_keys, - |u| u == sender_user, - &db, - ) - .await?; + let response = + get_keys_helper(Some(sender_user), &body.device_keys, |u| u == sender_user).await?; - Ok(response.into()) + Ok(response) } /// # `POST /_matrix/client/r0/keys/claim` /// /// Claims one-time keys -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/keys/claim", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn claim_keys_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { - let response = claim_keys_helper(&body.one_time_keys, &db).await?; + body: Ruma, +) -> Result { + let response = claim_keys_helper(&body.one_time_keys).await?; - db.flush()?; - - Ok(response.into()) + Ok(response) } /// # `POST /_matrix/client/r0/keys/device_signing/upload` @@ -133,15 +92,9 @@ pub async fn claim_keys_route( /// Uploads end-to-end key information for the sender user. /// /// - Requires UIAA to verify password -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/keys/device_signing/upload", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn upload_signing_keys_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); @@ -157,21 +110,18 @@ pub async fn upload_signing_keys_route( }; if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; + let (worked, uiaainfo) = + services() + .uiaa + .try_auth(sender_user, sender_device, auth, &uiaainfo)?; if !worked { return Err(Error::Uiaa(uiaainfo)); } // Success! } else if let Some(json) = body.json_body { uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa + services() + .uiaa .create(sender_user, sender_device, &uiaainfo, &json)?; return Err(Error::Uiaa(uiaainfo)); } else { @@ -179,38 +129,43 @@ pub async fn upload_signing_keys_route( } if let Some(master_key) = &body.master_key { - db.users.add_cross_signing_keys( + services().users.add_cross_signing_keys( sender_user, master_key, &body.self_signing_key, &body.user_signing_key, - &db.rooms, - &db.globals, )?; } - db.flush()?; - - Ok(upload_signing_keys::Response {}.into()) + Ok(upload_signing_keys::v3::Response {}) } /// # `POST /_matrix/client/r0/keys/signatures/upload` /// /// Uploads end-to-end key signatures from the sender user. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/keys/signatures/upload", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn upload_signatures_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for (user_id, signed_keys) in &body.signed_keys { - for (key_id, signed_key) in signed_keys { - for signature in signed_key + for (user_id, keys) in &body.signed_keys { + for (key_id, key) in keys { + let key = serde_json::to_value(key) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid key JSON"))?; + + let is_signed_key = match key.get("usage") { + Some(usage) => usage + .as_array() + .map(|usage| !usage.contains(&json!("master"))) + .unwrap_or(false), + None => true, + }; + + if !is_signed_key { + continue; + } + + for signature in key .get("signatures") .ok_or(Error::BadRequest( ErrorKind::InvalidParam, @@ -241,21 +196,16 @@ pub async fn upload_signatures_route( ))? .to_owned(), ); - db.users.sign_key( - user_id, - key_id, - signature, - sender_user, - &db.rooms, - &db.globals, - )?; + services() + .users + .sign_key(user_id, key_id, signature, sender_user)?; } } } - db.flush()?; - - Ok(upload_signatures::Response {}.into()) + Ok(upload_signatures::v3::Response { + failures: BTreeMap::new(), // TODO: integrate + }) } /// # `POST /_matrix/client/r0/keys/changes` @@ -263,23 +213,18 @@ pub async fn upload_signatures_route( /// Gets a list of users who have updated their device identity keys since the previous sync token. /// /// - TODO: left users -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/keys/changes", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_key_changes_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut device_list_updates = HashSet::new(); device_list_updates.extend( - db.users + services() + .users .keys_changed( - &sender_user.to_string(), + sender_user.as_str(), body.from .parse() .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`."))?, @@ -292,11 +237,17 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); - for room_id in db.rooms.rooms_joined(sender_user).filter_map(|r| r.ok()) { + for room_id in services() + .rooms + .state_cache + .rooms_joined(sender_user) + .filter_map(|r| r.ok()) + { device_list_updates.extend( - db.users + services() + .users .keys_changed( - &room_id.to_string(), + room_id.as_ref(), body.from.parse().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from`.") })?, @@ -307,19 +258,17 @@ pub async fn get_key_changes_route( .filter_map(|r| r.ok()), ); } - Ok(get_key_changes::Response { + Ok(get_key_changes::v3::Response { changed: device_list_updates.into_iter().collect(), left: Vec::new(), // TODO - } - .into()) + }) } pub(crate) async fn get_keys_helper bool>( sender_user: Option<&UserId>, - device_keys_input: &BTreeMap>>, + device_keys_input: &BTreeMap>, allowed_signatures: F, - db: &Database, -) -> Result { +) -> Result { let mut master_keys = BTreeMap::new(); let mut self_signing_keys = BTreeMap::new(); let mut user_signing_keys = BTreeMap::new(); @@ -328,7 +277,9 @@ pub(crate) async fn get_keys_helper bool>( let mut get_over_federation = HashMap::new(); for (user_id, device_ids) in device_keys_input { - if user_id.server_name() != db.globals.server_name() { + let user_id: &UserId = user_id; + + if user_id.server_name() != services().globals.server_name() { get_over_federation .entry(user_id.server_name()) .or_insert_with(Vec::new) @@ -338,57 +289,57 @@ pub(crate) async fn get_keys_helper bool>( if device_ids.is_empty() { let mut container = BTreeMap::new(); - for device_id in db.users.all_device_ids(user_id) { + for device_id in services().users.all_device_ids(user_id) { let device_id = device_id?; - if let Some(mut keys) = db.users.get_device_keys(user_id, &device_id)? { - let metadata = db + if let Some(mut keys) = services().users.get_device_keys(user_id, &device_id)? { + let metadata = services() .users .get_device_metadata(user_id, &device_id)? .ok_or_else(|| { Error::bad_database("all_device_keys contained nonexistent device.") })?; - keys.unsigned = UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }; - + add_unsigned_device_display_name(&mut keys, metadata) + .map_err(|_| Error::bad_database("invalid device keys in database"))?; container.insert(device_id, keys); } } - device_keys.insert(user_id.clone(), container); + device_keys.insert(user_id.to_owned(), container); } else { for device_id in device_ids { let mut container = BTreeMap::new(); - if let Some(mut keys) = db.users.get_device_keys(&user_id.clone(), device_id)? { - let metadata = db.users.get_device_metadata(user_id, device_id)?.ok_or( - Error::BadRequest( + if let Some(mut keys) = services().users.get_device_keys(user_id, device_id)? { + let metadata = services() + .users + .get_device_metadata(user_id, device_id)? + .ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Tried to get keys for nonexistent device.", - ), - )?; - - keys.unsigned = UnsignedDeviceInfo { - device_display_name: metadata.display_name, - }; + ))?; - container.insert(device_id.clone(), keys); + add_unsigned_device_display_name(&mut keys, metadata) + .map_err(|_| Error::bad_database("invalid device keys in database"))?; + container.insert(device_id.to_owned(), keys); } - device_keys.insert(user_id.clone(), container); + device_keys.insert(user_id.to_owned(), container); } } - if let Some(master_key) = db.users.get_master_key(user_id, &allowed_signatures)? { - master_keys.insert(user_id.clone(), master_key); + if let Some(master_key) = services() + .users + .get_master_key(user_id, &allowed_signatures)? + { + master_keys.insert(user_id.to_owned(), master_key); } - if let Some(self_signing_key) = db + if let Some(self_signing_key) = services() .users .get_self_signing_key(user_id, &allowed_signatures)? { - self_signing_keys.insert(user_id.clone(), self_signing_key); + self_signing_keys.insert(user_id.to_owned(), self_signing_key); } if Some(user_id) == sender_user { - if let Some(user_signing_key) = db.users.get_user_signing_key(user_id)? { - user_signing_keys.insert(user_id.clone(), user_signing_key); + if let Some(user_signing_key) = services().users.get_user_signing_key(user_id)? { + user_signing_keys.insert(user_id.to_owned(), user_signing_key); } } } @@ -400,13 +351,13 @@ pub(crate) async fn get_keys_helper bool>( .map(|(server, vec)| async move { let mut device_keys_input_fed = BTreeMap::new(); for (user_id, keys) in vec { - device_keys_input_fed.insert(user_id.clone(), keys.clone()); + device_keys_input_fed.insert(user_id.to_owned(), keys.clone()); } ( server, - db.sending + services() + .sending .send_federation_request( - &db.globals, server, federation::keys::get_keys::v1::Request { device_keys: device_keys_input_fed, @@ -430,7 +381,7 @@ pub(crate) async fn get_keys_helper bool>( } } - Ok(get_keys::Response { + Ok(get_keys::v3::Response { master_keys, self_signing_keys, user_signing_keys, @@ -439,16 +390,33 @@ pub(crate) async fn get_keys_helper bool>( }) } +fn add_unsigned_device_display_name( + keys: &mut Raw, + metadata: ruma::api::client::device::Device, +) -> serde_json::Result<()> { + if let Some(display_name) = metadata.display_name { + let mut object = keys.deserialize_as::>()?; + + let unsigned = object.entry("unsigned").or_insert_with(|| json!({})); + if let serde_json::Value::Object(unsigned_object) = unsigned { + unsigned_object.insert("device_display_name".to_owned(), display_name.into()); + } + + *keys = Raw::from_json(serde_json::value::to_raw_value(&object)?); + } + + Ok(()) +} + pub(crate) async fn claim_keys_helper( - one_time_keys_input: &BTreeMap, DeviceKeyAlgorithm>>, - db: &Database, -) -> Result { + one_time_keys_input: &BTreeMap>, +) -> Result { let mut one_time_keys = BTreeMap::new(); let mut get_over_federation = BTreeMap::new(); for (user_id, map) in one_time_keys_input { - if user_id.server_name() != db.globals.server_name() { + if user_id.server_name() != services().globals.server_name() { get_over_federation .entry(user_id.server_name()) .or_insert_with(Vec::new) @@ -458,8 +426,9 @@ pub(crate) async fn claim_keys_helper( let mut container = BTreeMap::new(); for (device_id, key_algorithm) in map { if let Some(one_time_keys) = - db.users - .take_one_time_key(user_id, device_id, key_algorithm, &db.globals)? + services() + .users + .take_one_time_key(user_id, device_id, key_algorithm)? { let mut c = BTreeMap::new(); c.insert(one_time_keys.0, one_time_keys.1); @@ -471,30 +440,40 @@ pub(crate) async fn claim_keys_helper( let mut failures = BTreeMap::new(); - for (server, vec) in get_over_federation { - let mut one_time_keys_input_fed = BTreeMap::new(); - for (user_id, keys) in vec { - one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); - } - // Ignore failures - if let Ok(keys) = db - .sending - .send_federation_request( - &db.globals, + let mut futures: FuturesUnordered<_> = get_over_federation + .into_iter() + .map(|(server, vec)| async move { + let mut one_time_keys_input_fed = BTreeMap::new(); + for (user_id, keys) in vec { + one_time_keys_input_fed.insert(user_id.clone(), keys.clone()); + } + ( server, - federation::keys::claim_keys::v1::Request { - one_time_keys: one_time_keys_input_fed, - }, + services() + .sending + .send_federation_request( + server, + federation::keys::claim_keys::v1::Request { + one_time_keys: one_time_keys_input_fed, + }, + ) + .await, ) - .await - { - one_time_keys.extend(keys.one_time_keys); - } else { - failures.insert(server.to_string(), json!({})); + }) + .collect(); + + while let Some((server, response)) = futures.next().await { + match response { + Ok(keys) => { + one_time_keys.extend(keys.one_time_keys); + } + Err(_e) => { + failures.insert(server.to_string(), json!({})); + } } } - Ok(claim_keys::Response { + Ok(claim_keys::v3::Response { failures, one_time_keys, }) diff --git a/src/api/client_server/media.rs b/src/api/client_server/media.rs new file mode 100644 index 00000000..ae023c95 --- /dev/null +++ b/src/api/client_server/media.rs @@ -0,0 +1,217 @@ +use crate::{service::media::FileMeta, services, utils, Error, Result, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + media::{ + create_content, get_content, get_content_as_filename, get_content_thumbnail, + get_media_config, + }, +}; + +const MXC_LENGTH: usize = 32; + +/// # `GET /_matrix/media/r0/config` +/// +/// Returns max upload size. +pub async fn get_media_config_route( + _body: Ruma, +) -> Result { + Ok(get_media_config::v3::Response { + upload_size: services().globals.max_request_size().into(), + }) +} + +/// # `POST /_matrix/media/r0/upload` +/// +/// Permanently save media in the server. +/// +/// - Some metadata will be saved in the database +/// - Media will be saved in the media/ directory +pub async fn create_content_route( + body: Ruma, +) -> Result { + let mxc = format!( + "mxc://{}/{}", + services().globals.server_name(), + utils::random_string(MXC_LENGTH) + ); + + services() + .media + .create( + mxc.clone(), + body.filename + .as_ref() + .map(|filename| "inline; filename=".to_owned() + filename) + .as_deref(), + body.content_type.as_deref(), + &body.file, + ) + .await?; + + Ok(create_content::v3::Response { + content_uri: mxc.try_into().expect("Invalid mxc:// URI"), + blurhash: None, + }) +} + +pub async fn get_remote_content( + mxc: &str, + server_name: &ruma::ServerName, + media_id: &str, +) -> Result { + let content_response = services() + .sending + .send_federation_request( + server_name, + get_content::v3::Request { + allow_remote: false, + server_name, + media_id, + }, + ) + .await?; + + services() + .media + .create( + mxc.to_string(), + content_response.content_disposition.as_deref(), + content_response.content_type.as_deref(), + &content_response.file, + ) + .await?; + + Ok(content_response) +} + +/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}` +/// +/// Load media from our server or over federation. +/// +/// - Only allows federation if `allow_remote` is true +pub async fn get_content_route( + body: Ruma, +) -> Result { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_disposition, + content_type, + file, + }) = services().media.get(mxc.clone()).await? + { + Ok(get_content::v3::Response { + file, + content_type, + content_disposition, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let remote_content_response = + get_remote_content(&mxc, &body.server_name, &body.media_id).await?; + Ok(remote_content_response) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +/// # `GET /_matrix/media/r0/download/{serverName}/{mediaId}/{fileName}` +/// +/// Load media from our server or over federation, permitting desired filename. +/// +/// - Only allows federation if `allow_remote` is true +pub async fn get_content_as_filename_route( + body: Ruma, +) -> Result { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_disposition: _, + content_type, + file, + }) = services().media.get(mxc.clone()).await? + { + Ok(get_content_as_filename::v3::Response { + file, + content_type, + content_disposition: Some(format!("inline; filename={}", body.filename)), + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let remote_content_response = + get_remote_content(&mxc, &body.server_name, &body.media_id).await?; + + Ok(get_content_as_filename::v3::Response { + content_disposition: Some(format!("inline: filename={}", body.filename)), + content_type: remote_content_response.content_type, + file: remote_content_response.file, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} + +/// # `GET /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` +/// +/// Load media thumbnail from our server or over federation. +/// +/// - Only allows federation if `allow_remote` is true +pub async fn get_content_thumbnail_route( + body: Ruma, +) -> Result { + let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); + + if let Some(FileMeta { + content_type, file, .. + }) = services() + .media + .get_thumbnail( + mxc.clone(), + body.width + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + body.height + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, + ) + .await? + { + Ok(get_content_thumbnail::v3::Response { + file, + content_type, + cross_origin_resource_policy: Some("cross-origin".to_owned()), + }) + } else if &*body.server_name != services().globals.server_name() && body.allow_remote { + let get_thumbnail_response = services() + .sending + .send_federation_request( + &body.server_name, + get_content_thumbnail::v3::Request { + allow_remote: false, + height: body.height, + width: body.width, + method: body.method.clone(), + server_name: &body.server_name, + media_id: &body.media_id, + }, + ) + .await?; + + services() + .media + .upload_thumbnail( + mxc, + None, + get_thumbnail_response.content_type.as_deref(), + body.width.try_into().expect("all UInts are valid u32s"), + body.height.try_into().expect("all UInts are valid u32s"), + &get_thumbnail_response.file, + ) + .await?; + + Ok(get_thumbnail_response) + } else { + Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) + } +} diff --git a/src/client_server/membership.rs b/src/api/client_server/membership.rs similarity index 51% rename from src/client_server/membership.rs rename to src/api/client_server/membership.rs index 732f6162..7142b8ef 100644 --- a/src/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -1,14 +1,8 @@ -use crate::{ - client_server, - database::DatabaseGuard, - pdu::{EventHash, PduBuilder, PduEvent}, - server_server, utils, ConduitResult, Database, Error, Result, Ruma, -}; use ruma::{ api::{ client::{ error::ErrorKind, - r0::membership::{ + membership::{ ban_user, forget_room, get_member_events, invite_user, join_room_by_id, join_room_by_id_or_alias, joined_members, joined_rooms, kick_user, leave_room, unban_user, IncomingThirdPartySigned, @@ -16,28 +10,29 @@ use ruma::{ }, federation::{self, membership::create_invite}, }, + canonical_json::to_canonical_value, events::{ - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - }, - EventType, + room::member::{MembershipState, RoomMemberEventContent}, + RoomEventType, StateEventType, }, - serde::{to_canonical_value, CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, RoomVersion}, - uint, EventId, RoomId, RoomVersionId, ServerName, UserId, + serde::Base64, + CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, OwnedServerName, + OwnedUserId, RoomId, RoomVersionId, UserId, }; use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; use std::{ collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, sync::{Arc, RwLock}, time::{Duration, Instant}, }; use tracing::{debug, error, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; +use crate::{ + service::pdu::{gen_event_id_canonical_json, PduBuilder}, + services, utils, Error, PduEvent, Result, Ruma, +}; + +use super::get_alias_helper; /// # `POST /_matrix/client/r0/rooms/{roomId}/join` /// @@ -45,43 +40,35 @@ use rocket::{get, post}; /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/join", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let mut servers: HashSet<_> = db - .rooms - .invite_state(sender_user, &body.room_id)? - .unwrap_or_default() - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); + let mut servers = Vec::new(); // There is no body.server_name for /roomId/join + servers.extend( + services() + .rooms + .state_cache + .invite_state(sender_user, &body.room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); - servers.insert(body.room_id.server_name().to_owned()); + servers.push(body.room_id.server_name().to_owned()); - let ret = join_room_by_id_helper( - &db, - body.sender_user.as_ref(), + join_room_by_id_helper( + body.sender_user.as_deref(), &body.room_id, &servers, body.third_party_signed.as_ref(), ) - .await; - - db.flush()?; - - ret + .await } /// # `POST /_matrix/client/r0/join/{roomIdOrAlias}` @@ -90,56 +77,50 @@ pub async fn join_room_by_id_route( /// /// - If the server knowns about this room: creates the join event and does auth rules locally /// - If the server does not know about the room: asks other servers over federation -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/join/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn join_room_by_id_or_alias_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_deref().expect("user is authenticated"); + let body = body.body; - let (servers, room_id) = match RoomId::try_from(body.room_id_or_alias.clone()) { + let (servers, room_id) = match OwnedRoomId::try_from(body.room_id_or_alias) { Ok(room_id) => { - let mut servers: HashSet<_> = db - .rooms - .invite_state(sender_user, &room_id)? - .unwrap_or_default() - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - servers.insert(room_id.server_name().to_owned()); + let mut servers = body.server_name.clone(); + servers.extend( + services() + .rooms + .state_cache + .invite_state(sender_user, &room_id)? + .unwrap_or_default() + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()), + ); + + servers.push(room_id.server_name().to_owned()); (servers, room_id) } Err(room_alias) => { - let response = client_server::get_alias_helper(&db, &room_alias).await?; + let response = get_alias_helper(&room_alias).await?; - (response.0.servers.into_iter().collect(), response.0.room_id) + (response.servers.into_iter().collect(), response.room_id) } }; let join_room_response = join_room_by_id_helper( - &db, - body.sender_user.as_ref(), + Some(sender_user), &room_id, &servers, body.third_party_signed.as_ref(), ) .await?; - db.flush()?; - - Ok(join_room_by_id_or_alias::Response { - room_id: join_room_response.0.room_id, - } - .into()) + Ok(join_room_by_id_or_alias::v3::Response { + room_id: join_room_response.room_id, + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/leave` @@ -147,42 +128,27 @@ pub async fn join_room_by_id_or_alias_route( /// Tries to leave the sender user from a room. /// /// - This should always work if the user is currently joined. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/leave", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn leave_room_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.leave_room(sender_user, &body.room_id, &db).await?; - - db.flush()?; + leave_room(sender_user, &body.room_id).await?; - Ok(leave_room::Response::new().into()) + Ok(leave_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/invite` /// /// Tries to send an invite event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/invite", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn invite_user_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if let invite_user::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { - invite_helper(sender_user, user_id, &body.room_id, &db, false).await?; - db.flush()?; - Ok(invite_user::Response {}.into()) + if let invite_user::v3::IncomingInvitationRecipient::UserId { user_id } = &body.recipient { + invite_helper(sender_user, user_id, &body.room_id, false).await?; + Ok(invite_user::v3::Response {}) } else { Err(Error::BadRequest(ErrorKind::NotFound, "User not found.")) } @@ -191,23 +157,19 @@ pub async fn invite_user_route( /// # `POST /_matrix/client/r0/rooms/{roomId}/kick` /// /// Tries to send a kick event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/kick", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn kick_user_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - db.rooms + services() + .rooms + .state_accessor .room_state_get( &body.room_id, - &EventType::RoomMember, - &body.user_id.to_string(), + &StateEventType::RoomMember, + body.user_id.as_ref(), )? .ok_or(Error::BadRequest( ErrorKind::BadState, @@ -222,7 +184,8 @@ pub async fn kick_user_route( // TODO: reason let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -231,9 +194,9 @@ pub async fn kick_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -241,49 +204,42 @@ pub async fn kick_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - - Ok(kick_user::Response::new().into()) + Ok(kick_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/ban` /// /// Tries to send a ban event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/ban", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn ban_user_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: reason - let event = db + let event = services() .rooms + .state_accessor .room_state_get( &body.room_id, - &EventType::RoomMember, - &body.user_id.to_string(), + &StateEventType::RoomMember, + body.user_id.as_ref(), )? .map_or( Ok(RoomMemberEventContent { membership: MembershipState::Ban, - displayname: db.users.displayname(&body.user_id)?, - avatar_url: db.users.avatar_url(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, + avatar_url: services().users.avatar_url(&body.user_id)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, reason: None, + join_authorized_via_users_server: None, }), |event| { serde_json::from_str(event.content.get()) @@ -296,7 +252,8 @@ pub async fn ban_user_route( )?; let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -305,9 +262,9 @@ pub async fn ban_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -315,37 +272,30 @@ pub async fn ban_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - - Ok(ban_user::Response::new().into()) + Ok(ban_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/unban` /// /// Tries to send an unban event into the room. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/unban", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn unban_user_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut event: RoomMemberEventContent = serde_json::from_str( - db.rooms + services() + .rooms + .state_accessor .room_state_get( &body.room_id, - &EventType::RoomMember, - &body.user_id.to_string(), + &StateEventType::RoomMember, + body.user_id.as_ref(), )? .ok_or(Error::BadRequest( ErrorKind::BadState, @@ -359,7 +309,8 @@ pub async fn unban_user_route( event.membership = MembershipState::Leave; let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -368,9 +319,9 @@ pub async fn unban_user_route( ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(body.user_id.to_string()), @@ -378,15 +329,12 @@ pub async fn unban_user_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - - Ok(unban_user::Response::new().into()) + Ok(unban_user::v3::Response::new()) } /// # `POST /_matrix/client/r0/rooms/{roomId}/forget` @@ -397,46 +345,35 @@ pub async fn unban_user_route( /// /// Note: Other devices of the user have no way of knowing the room was forgotten, so this has to /// be called from every device -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/forget", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn forget_room_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.rooms.forget(&body.room_id, sender_user)?; - - db.flush()?; + services() + .rooms + .state_cache + .forget(&body.room_id, sender_user)?; - Ok(forget_room::Response::new().into()) + Ok(forget_room::v3::Response::new()) } /// # `POST /_matrix/client/r0/joined_rooms` /// /// Lists all rooms the user has joined. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/joined_rooms", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn joined_rooms_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(joined_rooms::Response { - joined_rooms: db + Ok(joined_rooms::v3::Response { + joined_rooms: services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/members` @@ -444,35 +381,34 @@ pub async fn joined_rooms_route( /// Lists all joined users in a room (TODO: at a specific point in time, with a specific membership). /// /// - Only works if the user is currently joined -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/members", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_member_events_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // TODO: check history visibility? - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - Ok(get_member_events::Response { - chunk: db + Ok(get_member_events::v3::Response { + chunk: services() .rooms - .room_state_full(&body.room_id)? + .state_accessor + .room_state_full(&body.room_id) + .await? .iter() - .filter(|(key, _)| key.0 == EventType::RoomMember) + .filter(|(key, _)| key.0 == StateEventType::RoomMember) .map(|(_, pdu)| pdu.to_member_event()) .collect(), - } - .into()) + }) } /// # `POST /_matrix/client/r0/rooms/{roomId}/joined_members` @@ -481,18 +417,16 @@ pub async fn get_member_events_route( /// /// - The sender user must be in the room /// - TODO: An appservice just needs a puppet joined -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/joined_members", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn joined_members_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You aren't a member of the room.", @@ -500,58 +434,65 @@ pub async fn joined_members_route( } let mut joined = BTreeMap::new(); - for user_id in db.rooms.room_members(&body.room_id).filter_map(|r| r.ok()) { - let display_name = db.users.displayname(&user_id)?; - let avatar_url = db.users.avatar_url(&user_id)?; + for user_id in services() + .rooms + .state_cache + .room_members(&body.room_id) + .filter_map(|r| r.ok()) + { + let display_name = services().users.displayname(&user_id)?; + let avatar_url = services().users.avatar_url(&user_id)?; joined.insert( user_id, - joined_members::RoomMember { + joined_members::v3::RoomMember { display_name, avatar_url, }, ); } - Ok(joined_members::Response { joined }.into()) + Ok(joined_members::v3::Response { joined }) } -#[tracing::instrument(skip(db))] async fn join_room_by_id_helper( - db: &Database, sender_user: Option<&UserId>, room_id: &RoomId, - servers: &HashSet>, + servers: &[OwnedServerName], _third_party_signed: Option<&IncomingThirdPartySigned>, -) -> ConduitResult { +) -> Result { let sender_user = sender_user.expect("user is authenticated"); let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; - // Ask a remote server if we don't have this room - if !db.rooms.exists(room_id)? && room_id.server_name() != db.globals.server_name() { + // Ask a remote server if we are not participating in this room + if !services() + .rooms + .state_cache + .server_in_room(services().globals.server_name(), room_id)? + { let mut make_join_response_and_server = Err(Error::BadServerResponse( "No server available to assist in joining.", )); for remote_server in servers { - let make_join_response = db + let make_join_response = services() .sending .send_federation_request( - &db.globals, remote_server, - federation::membership::create_join_event_template::v1::Request { + federation::membership::prepare_join_event::v1::Request { room_id, user_id: sender_user, - ver: &[RoomVersionId::Version5, RoomVersionId::Version6], + ver: &services().globals.supported_room_versions(), }, ) .await; @@ -567,8 +508,10 @@ async fn join_room_by_id_helper( let room_version = match make_join_response.room_version { Some(room_version) - if room_version == RoomVersionId::Version5 - || room_version == RoomVersionId::Version6 => + if services() + .globals + .supported_room_versions() + .contains(&room_version) => { room_version } @@ -580,10 +523,19 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid make_join event json received from server.") })?; + let join_authorized_via_users_server = join_event_stub + .get("content") + .map(|s| { + s.as_object()? + .get("join_authorised_via_users_server")? + .as_str() + }) + .and_then(|s| OwnedUserId::try_from(s.unwrap_or_default()).ok()); + // TODO: Is origin needed? join_event_stub.insert( "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), ); join_event_stub.insert( "origin_server_ts".to_owned(), @@ -597,12 +549,13 @@ async fn join_room_by_id_helper( "content".to_owned(), to_canonical_value(RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server, }) .expect("event is valid, we just created it"), ); @@ -612,20 +565,21 @@ async fn join_room_by_id_helper( // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), + services().globals.server_name().as_str(), + services().globals.keypair(), &mut join_event_stub, &room_version, ) .expect("event is valid, we just created it"); // Generate event id - let event_id = EventId::try_from(&*format!( + let event_id = format!( "${}", ruma::signatures::reference_hash(&join_event_stub, &room_version) .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); // Add event_id back join_event_stub.insert( @@ -636,40 +590,37 @@ async fn join_room_by_id_helper( // It has enough fields to be called a proper event now let join_event = join_event_stub; - let send_join_response = db + let send_join_response = services() .sending .send_federation_request( - &db.globals, remote_server, federation::membership::create_join_event::v2::Request { room_id, - event_id: &event_id, + event_id, pdu: &PduEvent::convert_to_outgoing_federation_event(join_event.clone()), }, ) .await?; - db.rooms.get_or_create_shortroomid(room_id, &db.globals)?; + services().rooms.short.get_or_create_shortroomid(room_id)?; - let pdu = PduEvent::from_id_val(&event_id, join_event.clone()) + let parsed_pdu = PduEvent::from_id_val(event_id, join_event.clone()) .map_err(|_| Error::BadServerResponse("Invalid join event PDU."))?; let mut state = HashMap::new(); let pub_key_map = RwLock::new(BTreeMap::new()); - server_server::fetch_join_signing_keys( - &send_join_response, - &room_version, - &pub_key_map, - db, - ) - .await?; + services() + .rooms + .event_handler + .fetch_join_signing_keys(&send_join_response, &room_version, &pub_key_map) + .await?; for result in send_join_response .room_state .state .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, @@ -681,85 +632,109 @@ async fn join_room_by_id_helper( Error::BadServerResponse("Invalid PDU in send_join response.") })?; - db.rooms.add_pdu_outlier(&event_id, &value)?; + services() + .rooms + .outlier + .add_pdu_outlier(&event_id, &value)?; if let Some(state_key) = &pdu.state_key { - let shortstatekey = - db.rooms - .get_or_create_shortstatekey(&pdu.kind, state_key, &db.globals)?; + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&pdu.kind.to_string().into(), state_key)?; state.insert(shortstatekey, pdu.event_id.clone()); } } - let incoming_shortstatekey = db.rooms.get_or_create_shortstatekey( - &pdu.kind, - pdu.state_key + let incoming_shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &parsed_pdu.kind.to_string().into(), + parsed_pdu + .state_key .as_ref() .expect("Pdu is a membership state event"), - &db.globals, )?; - state.insert(incoming_shortstatekey, pdu.event_id.clone()); + state.insert(incoming_shortstatekey, parsed_pdu.event_id.clone()); - let create_shortstatekey = db + let create_shortstatekey = services() .rooms - .get_shortstatekey(&EventType::RoomCreate, "")? + .short + .get_shortstatekey(&StateEventType::RoomCreate, "")? .expect("Room exists"); if state.get(&create_shortstatekey).is_none() { return Err(Error::BadServerResponse("State contained no create event.")); } - db.rooms.force_state( - room_id, - state - .into_iter() - .map(|(k, id)| db.rooms.compress_state_event(k, &id, &db.globals)) - .collect::>()?, - db, - )?; - for result in send_join_response .room_state .auth_chain .iter() - .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map, db)) + .map(|pdu| validate_and_add_event_id(pdu, &room_version, &pub_key_map)) { let (event_id, value) = match result { Ok(t) => t, Err(_) => continue, }; - db.rooms.add_pdu_outlier(&event_id, &value)?; + services() + .rooms + .outlier + .add_pdu_outlier(&event_id, &value)?; } + let (statehash_before_join, new, removed) = services().rooms.state_compressor.save_state( + room_id, + state + .into_iter() + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(k, &id) + }) + .collect::>()?, + )?; + + services() + .rooms + .state + .force_state(room_id, statehash_before_join, new, removed, &state_lock) + .await?; + + services().rooms.state_cache.update_joined_count(room_id)?; + // We append to state before appending the pdu, so we don't have a moment in time with the // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = db.rooms.append_to_state(&pdu, &db.globals)?; + let statehash_after_join = services().rooms.state.append_to_state(&parsed_pdu)?; - db.rooms.append_pdu( - &pdu, - utils::to_canonical_object(&pdu).expect("Pdu is valid canonical object"), - &[pdu.event_id.clone()], - db, + services().rooms.timeline.append_pdu( + &parsed_pdu, + join_event, + vec![(*parsed_pdu.event_id).to_owned()], + &state_lock, )?; // We set the room state after inserting the pdu, so that we never have a moment in time // where events in the current room state do not exist - db.rooms.set_room_state(room_id, statehashid)?; + services() + .rooms + .state + .set_room_state(room_id, statehash_after_join, &state_lock)?; } else { let event = RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }; - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&event).expect("event is valid, we just created it"), unsigned: None, state_key: Some(sender_user.to_string()), @@ -767,43 +742,45 @@ async fn join_room_by_id_helper( }, sender_user, room_id, - db, &state_lock, )?; } drop(state_lock); - db.flush()?; - - Ok(join_room_by_id::Response::new(room_id.clone()).into()) + Ok(join_room_by_id::v3::Response::new(room_id.to_owned())) } fn validate_and_add_event_id( pdu: &RawJsonValue, room_version: &RoomVersionId, - pub_key_map: &RwLock>>, - db: &Database, -) -> Result<(EventId, CanonicalJsonObject)> { + pub_key_map: &RwLock>>, +) -> Result<(OwnedEventId, CanonicalJsonObject)> { let mut value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = EventId::try_from(&*format!( + let event_id = EventId::parse(format!( "${}", ruma::signatures::reference_hash(&value, room_version) .expect("ruma can calculate reference hashes") )) .expect("ruma's reference hashes are valid event ids"); - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { + let back_off = |id| match services() + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry(id) + { Entry::Vacant(e) => { e.insert((Instant::now(), 1)); } Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), }; - if let Some((time, tries)) = db + if let Some((time, tries)) = services() .globals .bad_event_ratelimiter .read() @@ -846,58 +823,21 @@ pub(crate) async fn invite_helper<'a>( sender_user: &UserId, user_id: &UserId, room_id: &RoomId, - db: &Database, is_direct: bool, ) -> Result<()> { - if user_id.server_name() != db.globals.server_name() { - let (room_version_id, pdu_json, invite_room_state) = { + if user_id.server_name() != services().globals.server_name() { + let (pdu_json, invite_room_state) = { let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(room_id, &EventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - - // If there was no create event yet, assume we are creating a version 6 room right now - let room_version_id = create_event_content - .map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); - let room_version = - RoomVersion::new(&room_version_id).expect("room version is supported"); - let content = to_raw_value(&RoomMemberEventContent { avatar_url: None, displayname: None, @@ -906,130 +846,50 @@ pub(crate) async fn invite_helper<'a>( third_party_invite: None, blurhash: None, reason: None, + join_authorized_via_users_server: None, }) .expect("member event is valid value"); - let state_key = user_id.to_string(); - let kind = EventType::RoomMember; - - let auth_events = db.rooms.get_auth_events( - room_id, - &kind, + let (pdu, pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, sender_user, - Some(&state_key), - &content, + room_id, + &state_lock, )?; - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"), - ); - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender_user.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind, - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - create_prev_event, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - to_canonical_value(db.globals.server_name()) - .expect("server name is a valid CanonicalJsonValue"), - ); - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - let invite_room_state = db.rooms.calculate_invite_state(&pdu)?; + let invite_room_state = services().rooms.state.calculate_invite_state(&pdu)?; drop(state_lock); - (room_version_id, pdu_json, invite_room_state) + (pdu_json, invite_room_state) }; // Generate event id - let expected_event_id = EventId::try_from(&*format!( + let expected_event_id = format!( "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); + ruma::signatures::reference_hash( + &pdu_json, + &services().rooms.state.get_room_version(room_id)? + ) + .expect("ruma can calculate reference hashes") + ); + let expected_event_id = <&EventId>::try_from(expected_event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); - let response = db + let response = services() .sending .send_federation_request( - &db.globals, user_id.server_name(), create_invite::v2::Request { room_id, - event_id: &expected_event_id, - room_version: &room_version_id, + event_id: expected_event_id, + room_version: &services().rooms.state.get_room_version(room_id)?, event: &PduEvent::convert_to_outgoing_federation_event(pdu_json.clone()), invite_room_state: &invite_room_state, }, @@ -1039,7 +899,7 @@ pub(crate) async fn invite_helper<'a>( let pub_key_map = RwLock::new(BTreeMap::new()); // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(&response.event) { + let (event_id, value) = match gen_event_id_canonical_json(&response.event) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -1054,7 +914,7 @@ pub(crate) async fn invite_helper<'a>( warn!("Server {} changed invite event, that's not allowed in the spec: ours: {:?}, theirs: {:?}", user_id.server_name(), pdu_json, value); } - let origin: Box = serde_json::from_value( + let origin: OwnedServerName = serde_json::from_value( serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Event needs an origin field.", @@ -1063,59 +923,63 @@ pub(crate) async fn invite_helper<'a>( ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - let pdu_id = server_server::handle_incoming_pdu( - &origin, - &event_id, - room_id, - value, - true, - db, - &pub_key_map, - ) - .await - .map_err(|_| { - Error::BadRequest( + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .await? + .ok_or(Error::BadRequest( ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; + "Could not accept incoming PDU as timeline event.", + ))?; - let servers = db + // Bind to variable because of lifetimes + let servers = services() .rooms + .state_cache .room_servers(room_id) .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); + .filter(|server| &**server != services().globals.server_name()); - db.sending.send_pdu(servers, &pdu_id)?; + services().sending.send_pdu(servers, &pdu_id)?; return Ok(()); } + if !services() + .rooms + .state_cache + .is_joined(sender_user, room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Invite, - displayname: db.users.displayname(user_id)?, - avatar_url: db.users.avatar_url(user_id)?, + displayname: services().users.displayname(user_id)?, + avatar_url: services().users.avatar_url(user_id)?, is_direct: Some(is_direct), third_party_invite: None, - blurhash: db.users.blurhash(user_id)?, + blurhash: services().users.blurhash(user_id)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -1124,7 +988,6 @@ pub(crate) async fn invite_helper<'a>( }, sender_user, room_id, - db, &state_lock, )?; @@ -1132,3 +995,218 @@ pub(crate) async fn invite_helper<'a>( Ok(()) } + +// Make a user leave all their joined rooms +pub async fn leave_all_rooms(user_id: &UserId) -> Result<()> { + let all_rooms = services() + .rooms + .state_cache + .rooms_joined(user_id) + .chain( + services() + .rooms + .state_cache + .rooms_invited(user_id) + .map(|t| t.map(|(r, _)| r)), + ) + .collect::>(); + + for room_id in all_rooms { + let room_id = match room_id { + Ok(room_id) => room_id, + Err(_) => continue, + }; + + let _ = leave_room(user_id, &room_id).await; + } + + Ok(()) +} + +pub async fn leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { + // Ask a remote server if we don't have this room + if !services().rooms.metadata.exists(room_id)? + && room_id.server_name() != services().globals.server_name() + { + if let Err(e) = remote_leave_room(user_id, room_id).await { + warn!("Failed to leave room {} remotely: {}", user_id, e); + // Don't tell the client about this error + } + + let last_state = services() + .rooms + .state_cache + .invite_state(user_id, room_id)? + .map_or_else( + || services().rooms.state_cache.left_state(user_id, room_id), + |s| Ok(Some(s)), + )?; + + // We always drop the invite, we can't rely on other servers + services().rooms.state_cache.update_membership( + room_id, + user_id, + MembershipState::Leave, + user_id, + last_state, + true, + )?; + } else { + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + let mut event: RoomMemberEventContent = serde_json::from_str( + services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "Cannot leave a room you are not a member of.", + ))? + .content + .get(), + ) + .map_err(|_| Error::bad_database("Invalid member event in database."))?; + + event.membership = MembershipState::Leave; + + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&event).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + room_id, + &state_lock, + )?; + } + + Ok(()) +} + +async fn remote_leave_room(user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut make_leave_response_and_server = Err(Error::BadServerResponse( + "No server available to assist in leaving.", + )); + + let invite_state = services() + .rooms + .state_cache + .invite_state(user_id, room_id)? + .ok_or(Error::BadRequest( + ErrorKind::BadState, + "User is not invited.", + ))?; + + let servers: HashSet<_> = invite_state + .iter() + .filter_map(|event| serde_json::from_str(event.json().get()).ok()) + .filter_map(|event: serde_json::Value| event.get("sender").cloned()) + .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) + .filter_map(|sender| UserId::parse(sender).ok()) + .map(|user| user.server_name().to_owned()) + .collect(); + + for remote_server in servers { + let make_leave_response = services() + .sending + .send_federation_request( + &remote_server, + federation::membership::prepare_leave_event::v1::Request { room_id, user_id }, + ) + .await; + + make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); + + if make_leave_response_and_server.is_ok() { + break; + } + } + + let (make_leave_response, remote_server) = make_leave_response_and_server?; + + let room_version_id = match make_leave_response.room_version { + Some(version) + if services() + .globals + .supported_room_versions() + .contains(&version) => + { + version + } + _ => return Err(Error::BadServerResponse("Room version is not supported")), + }; + + let mut leave_event_stub = serde_json::from_str::( + make_leave_response.event.get(), + ) + .map_err(|_| Error::BadServerResponse("Invalid make_leave event json received from server."))?; + + // TODO: Is origin needed? + leave_event_stub.insert( + "origin".to_owned(), + CanonicalJsonValue::String(services().globals.server_name().as_str().to_owned()), + ); + leave_event_stub.insert( + "origin_server_ts".to_owned(), + CanonicalJsonValue::Integer( + utils::millis_since_unix_epoch() + .try_into() + .expect("Timestamp is valid js_int value"), + ), + ); + // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms + leave_event_stub.remove("event_id"); + + // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut leave_event_stub, + &room_version_id, + ) + .expect("event is valid, we just created it"); + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + leave_event_stub.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + + // It has enough fields to be called a proper event now + let leave_event = leave_event_stub; + + services() + .sending + .send_federation_request( + &remote_server, + federation::membership::create_leave_event::v2::Request { + room_id, + event_id: &event_id, + pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), + }, + ) + .await?; + + Ok(()) +} diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs new file mode 100644 index 00000000..b04c2626 --- /dev/null +++ b/src/api/client_server/message.rs @@ -0,0 +1,271 @@ +use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; +use ruma::{ + api::client::{ + error::ErrorKind, + message::{get_message_events, send_message_event}, + }, + events::{RoomEventType, StateEventType}, +}; +use std::{ + collections::{BTreeMap, HashSet}, + sync::Arc, +}; + +/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}` +/// +/// Send a message event into the room. +/// +/// - Is a NOOP if the txn id was already used before and returns the same event id again +/// - The only requirement for the content is that it has to be valid json +/// - Tries to send the event into the room, auth rules will determine if it is allowed +pub async fn send_message_event_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_deref(); + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .unwrap() + .entry(body.room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Forbid m.room.encrypted if encryption is disabled + if RoomEventType::RoomEncrypted == body.event_type.to_string().into() + && !services().globals.allow_encryption() + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Encryption has been disabled", + )); + } + + // Check if this is a new transaction id + if let Some(response) = + services() + .transaction_ids + .existing_txnid(sender_user, sender_device, &body.txn_id)? + { + // The client might have sent a txnid of the /sendToDevice endpoint + // This txnid has no response associated with it + if response.is_empty() { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Tried to use txn id already used for an incompatible endpoint.", + )); + } + + let event_id = utils::string_from_bytes(&response) + .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; + return Ok(send_message_event::v3::Response { event_id }); + } + + let mut unsigned = BTreeMap::new(); + unsigned.insert("transaction_id".to_owned(), body.txn_id.to_string().into()); + + let event_id = services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: body.event_type.to_string().into(), + content: serde_json::from_str(body.body.body.json().get()) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, + unsigned: Some(unsigned), + state_key: None, + redacts: None, + }, + sender_user, + &body.room_id, + &state_lock, + )?; + + services().transaction_ids.add_txnid( + sender_user, + sender_device, + &body.txn_id, + event_id.as_bytes(), + )?; + + drop(state_lock); + + Ok(send_message_event::v3::Response::new( + (*event_id).to_owned(), + )) +} + +/// # `GET /_matrix/client/r0/rooms/{roomId}/messages` +/// +/// Allows paginating through room history. +/// +/// - Only works if the user is joined (TODO: always allow, but only show events where the user was +/// joined, depending on history_visibility) +pub async fn get_message_events_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let sender_device = body.sender_device.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You don't have permission to view this room.", + )); + } + + let from = match body.from.clone() { + Some(from) => from + .parse() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?, + + None => match body.dir { + ruma::api::client::Direction::Forward => 0, + ruma::api::client::Direction::Backward => u64::MAX, + }, + }; + + let to = body.to.as_ref().map(|t| t.parse()); + + services().rooms.lazy_loading.lazy_load_confirm_delivery( + sender_user, + sender_device, + &body.room_id, + from, + )?; + + // Use limit or else 10 + let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); + + let next_token; + + let mut resp = get_message_events::v3::Response::new(); + + let mut lazy_loaded = HashSet::new(); + + match body.dir { + ruma::api::client::Direction::Forward => { + let events_after: Vec<_> = services() + .rooms + .timeline + .pdus_after(sender_user, &body.room_id, from)? + .take(limit) + .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(|(pdu_id, pdu)| { + services() + .rooms + .timeline + .pdu_count(&pdu_id) + .map(|pdu_count| (pdu_count, pdu)) + .ok() + }) + .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` + .collect(); + + for (_, event) in &events_after { + /* TODO: Remove this when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + */ + lazy_loaded.insert(event.sender.clone()); + } + + next_token = events_after.last().map(|(count, _)| count).copied(); + + let events_after: Vec<_> = events_after + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(); + + resp.start = from.to_string(); + resp.end = next_token.map(|count| count.to_string()); + resp.chunk = events_after; + } + ruma::api::client::Direction::Backward => { + let events_before: Vec<_> = services() + .rooms + .timeline + .pdus_until(sender_user, &body.room_id, from)? + .take(limit) + .filter_map(|r| r.ok()) // Filter out buggy events + .filter_map(|(pdu_id, pdu)| { + services() + .rooms + .timeline + .pdu_count(&pdu_id) + .map(|pdu_count| (pdu_count, pdu)) + .ok() + }) + .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` + .collect(); + + for (_, event) in &events_before { + /* TODO: Remove this when these are resolved: + * https://github.com/vector-im/element-android/issues/3417 + * https://github.com/vector-im/element-web/issues/21034 + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + sender_user, + sender_device, + &body.room_id, + &event.sender, + )? { + lazy_loaded.insert(event.sender.clone()); + } + */ + lazy_loaded.insert(event.sender.clone()); + } + + next_token = events_before.last().map(|(count, _)| count).copied(); + + let events_before: Vec<_> = events_before + .into_iter() + .map(|(_, pdu)| pdu.to_room_event()) + .collect(); + + resp.start = from.to_string(); + resp.end = next_token.map(|count| count.to_string()); + resp.chunk = events_before; + } + } + + resp.state = Vec::new(); + for ll_id in &lazy_loaded { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + &body.room_id, + &StateEventType::RoomMember, + ll_id.as_str(), + )? { + resp.state.push(member_event.to_state_event()); + } + } + + // TODO: enable again when we are sure clients can handle it + /* + if let Some(next_token) = next_token { + services().rooms.lazy_loading.lazy_load_mark_sent( + sender_user, + sender_device, + &body.room_id, + lazy_loaded, + next_token, + ); + } + */ + + Ok(resp) +} diff --git a/src/client_server/mod.rs b/src/api/client_server/mod.rs similarity index 70% rename from src/client_server/mod.rs rename to src/api/client_server/mod.rs index e0c340f1..65b7a100 100644 --- a/src/client_server/mod.rs +++ b/src/api/client_server/mod.rs @@ -16,6 +16,7 @@ mod profile; mod push; mod read_marker; mod redact; +mod report; mod room; mod search; mod session; @@ -47,6 +48,7 @@ pub use profile::*; pub use push::*; pub use read_marker::*; pub use redact::*; +pub use report::*; pub use room::*; pub use search::*; pub use session::*; @@ -60,23 +62,7 @@ pub use unversioned::*; pub use user_directory::*; pub use voip::*; -#[cfg(not(feature = "conduit_bin"))] -use super::State; -#[cfg(feature = "conduit_bin")] -use { - crate::ConduitResult, rocket::options, ruma::api::client::r0::to_device::send_event_to_device, -}; - pub const DEVICE_ID_LENGTH: usize = 10; pub const TOKEN_LENGTH: usize = 256; pub const SESSION_ID_LENGTH: usize = 256; - -/// # `OPTIONS` -/// -/// Web clients use this to get CORS headers. -#[cfg(feature = "conduit_bin")] -#[options("/<_..>")] -#[tracing::instrument] -pub async fn options_route() -> ConduitResult { - Ok(send_event_to_device::Response {}.into()) -} +pub const AUTO_GEN_PASSWORD_LENGTH: usize = 15; diff --git a/src/client_server/presence.rs b/src/api/client_server/presence.rs similarity index 62% rename from src/client_server/presence.rs rename to src/api/client_server/presence.rs index aaa78a92..dfac3dbd 100644 --- a/src/client_server/presence.rs +++ b/src/api/client_server/presence.rs @@ -1,35 +1,26 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; -use ruma::api::client::r0::presence::{get_presence, set_presence}; -use std::{convert::TryInto, time::Duration}; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; +use crate::{services, utils, Result, Ruma}; +use ruma::api::client::presence::{get_presence, set_presence}; +use std::time::Duration; /// # `PUT /_matrix/client/r0/presence/{userId}/status` /// /// Sets the presence state of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/presence/<_>/status", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn set_presence_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for room_id in db.rooms.rooms_joined(sender_user) { + for room_id in services().rooms.state_cache.rooms_joined(sender_user) { let room_id = room_id?; - db.rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -40,13 +31,10 @@ pub async fn set_presence_route( }, sender: sender_user.clone(), }, - &db.globals, )?; } - db.flush()?; - - Ok(set_presence::Response {}.into()) + Ok(set_presence::v3::Response {}) } /// # `GET /_matrix/client/r0/presence/{userId}/status` @@ -54,28 +42,24 @@ pub async fn set_presence_route( /// Gets the presence state of the given user. /// /// - Only works if you share a room with the user -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/presence/<_>/status", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_presence_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let mut presence_event = None; - for room_id in db + for room_id in services() .rooms + .user .get_shared_rooms(vec![sender_user.clone(), body.user_id.clone()])? { let room_id = room_id?; - if let Some(presence) = db + if let Some(presence) = services() .rooms .edus + .presence .get_last_presence_event(sender_user, &room_id)? { presence_event = Some(presence); @@ -84,7 +68,7 @@ pub async fn get_presence_route( } if let Some(presence) = presence_event { - Ok(get_presence::Response { + Ok(get_presence::v3::Response { // TODO: Should ruma just use the presenceeventcontent type here? status_msg: presence.content.status_msg, currently_active: presence.content.currently_active, @@ -93,8 +77,7 @@ pub async fn get_presence_route( .last_active_ago .map(|millis| Duration::from_millis(millis.into())), presence: presence.content.presence, - } - .into()) + }) } else { todo!(); } diff --git a/src/client_server/profile.rs b/src/api/client_server/profile.rs similarity index 65% rename from src/client_server/profile.rs rename to src/api/client_server/profile.rs index 29b1ae87..5ace1777 100644 --- a/src/client_server/profile.rs +++ b/src/api/client_server/profile.rs @@ -1,58 +1,53 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; +use crate::{service::pdu::PduBuilder, services, utils, Error, Result, Ruma}; use ruma::{ api::{ client::{ error::ErrorKind, - r0::profile::{ + profile::{ get_avatar_url, get_display_name, get_profile, set_avatar_url, set_display_name, }, }, federation::{self, query::get_profile_information::v1::ProfileField}, }, - events::{room::member::RoomMemberEventContent, EventType}, + events::{room::member::RoomMemberEventContent, RoomEventType, StateEventType}, }; use serde_json::value::to_raw_value; -use std::{convert::TryInto, sync::Arc}; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; +use std::sync::Arc; /// # `PUT /_matrix/client/r0/profile/{userId}/displayname` /// /// Updates the displayname. /// /// - Also makes sure other users receive the update using presence EDUs -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_>/displayname", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn set_displayname_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.users + services() + .users .set_displayname(sender_user, body.displayname.clone())?; // Send a new membership event and presence update into all joined rooms - let all_rooms_joined: Vec<_> = db + let all_rooms_joined: Vec<_> = services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { displayname: body.displayname.clone(), ..serde_json::from_str( - db.rooms + services() + .rooms + .state_accessor .room_state_get( &room_id, - &EventType::RoomMember, - &sender_user.to_string(), + &StateEventType::RoomMember, + sender_user.as_str(), )? .ok_or_else(|| { Error::bad_database( @@ -78,7 +73,8 @@ pub async fn set_displayname_route( for (pdu_builder, room_id) in all_rooms_joined { let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -87,19 +83,22 @@ pub async fn set_displayname_route( ); let state_lock = mutex_state.lock().await; - let _ = db - .rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock); + let _ = services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + ); // Presence update - db.rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -110,13 +109,10 @@ pub async fn set_displayname_route( }, sender: sender_user.clone(), }, - &db.globals, )?; } - db.flush()?; - - Ok(set_display_name::Response {}.into()) + Ok(set_display_name::v3::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/displayname` @@ -124,20 +120,13 @@ pub async fn set_displayname_route( /// Returns the displayname of the user. /// /// - If user is on another server: Fetches displayname over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>/displayname", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_displayname_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + body: Ruma, +) -> Result { + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -146,16 +135,14 @@ pub async fn get_displayname_route( ) .await?; - return Ok(get_display_name::Response { + return Ok(get_display_name::v3::Response { displayname: response.displayname, - } - .into()); + }); } - Ok(get_display_name::Response { - displayname: db.users.displayname(&body.user_id)?, - } - .into()) + Ok(get_display_name::v3::Response { + displayname: services().users.displayname(&body.user_id)?, + }) } /// # `PUT /_matrix/client/r0/profile/{userId}/avatar_url` @@ -163,39 +150,39 @@ pub async fn get_displayname_route( /// Updates the avatar_url and blurhash. /// /// - Also makes sure other users receive the update using presence EDUs -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/profile/<_>/avatar_url", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn set_avatar_url_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.users + services() + .users .set_avatar_url(sender_user, body.avatar_url.clone())?; - db.users.set_blurhash(sender_user, body.blurhash.clone())?; + services() + .users + .set_blurhash(sender_user, body.blurhash.clone())?; // Send a new membership event and presence update into all joined rooms - let all_joined_rooms: Vec<_> = db + let all_joined_rooms: Vec<_> = services() .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .map(|room_id| { Ok::<_, Error>(( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { avatar_url: body.avatar_url.clone(), ..serde_json::from_str( - db.rooms + services() + .rooms + .state_accessor .room_state_get( &room_id, - &EventType::RoomMember, - &sender_user.to_string(), + &StateEventType::RoomMember, + sender_user.as_str(), )? .ok_or_else(|| { Error::bad_database( @@ -221,7 +208,8 @@ pub async fn set_avatar_url_route( for (pdu_builder, room_id) in all_joined_rooms { let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -230,19 +218,22 @@ pub async fn set_avatar_url_route( ); let state_lock = mutex_state.lock().await; - let _ = db - .rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock); + let _ = services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + ); // Presence update - db.rooms.edus.update_presence( + services().rooms.edus.presence.update_presence( sender_user, &room_id, ruma::events::presence::PresenceEvent { content: ruma::events::presence::PresenceEventContent { - avatar_url: db.users.avatar_url(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, currently_active: None, - displayname: db.users.displayname(sender_user)?, + displayname: services().users.displayname(sender_user)?, last_active_ago: Some( utils::millis_since_unix_epoch() .try_into() @@ -253,13 +244,10 @@ pub async fn set_avatar_url_route( }, sender: sender_user.clone(), }, - &db.globals, )?; } - db.flush()?; - - Ok(set_avatar_url::Response {}.into()) + Ok(set_avatar_url::v3::Response {}) } /// # `GET /_matrix/client/r0/profile/{userId}/avatar_url` @@ -267,20 +255,13 @@ pub async fn set_avatar_url_route( /// Returns the avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches avatar_url and blurhash over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>/avatar_url", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_avatar_url_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + body: Ruma, +) -> Result { + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -289,18 +270,16 @@ pub async fn get_avatar_url_route( ) .await?; - return Ok(get_avatar_url::Response { + return Ok(get_avatar_url::v3::Response { avatar_url: response.avatar_url, blurhash: response.blurhash, - } - .into()); + }); } - Ok(get_avatar_url::Response { - avatar_url: db.users.avatar_url(&body.user_id)?, - blurhash: db.users.blurhash(&body.user_id)?, - } - .into()) + Ok(get_avatar_url::v3::Response { + avatar_url: services().users.avatar_url(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, + }) } /// # `GET /_matrix/client/r0/profile/{userId}` @@ -308,20 +287,13 @@ pub async fn get_avatar_url_route( /// Returns the displayname, avatar_url and blurhash of the user. /// /// - If user is on another server: Fetches profile over federation -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/profile/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_profile_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if body.user_id.server_name() != db.globals.server_name() { - let response = db + body: Ruma, +) -> Result { + if body.user_id.server_name() != services().globals.server_name() { + let response = services() .sending .send_federation_request( - &db.globals, body.user_id.server_name(), federation::query::get_profile_information::v1::Request { user_id: &body.user_id, @@ -330,15 +302,14 @@ pub async fn get_profile_route( ) .await?; - return Ok(get_profile::Response { + return Ok(get_profile::v3::Response { displayname: response.displayname, avatar_url: response.avatar_url, blurhash: response.blurhash, - } - .into()); + }); } - if !db.users.exists(&body.user_id)? { + if !services().users.exists(&body.user_id)? { // Return 404 if this user doesn't exist return Err(Error::BadRequest( ErrorKind::NotFound, @@ -346,10 +317,9 @@ pub async fn get_profile_route( )); } - Ok(get_profile::Response { - avatar_url: db.users.avatar_url(&body.user_id)?, - blurhash: db.users.blurhash(&body.user_id)?, - displayname: db.users.displayname(&body.user_id)?, - } - .into()) + Ok(get_profile::v3::Response { + avatar_url: services().users.avatar_url(&body.user_id)?, + blurhash: services().users.blurhash(&body.user_id)?, + displayname: services().users.displayname(&body.user_id)?, + }) } diff --git a/src/client_server/push.rs b/src/api/client_server/push.rs similarity index 70% rename from src/client_server/push.rs rename to src/api/client_server/push.rs index 64f27f1c..2301ddca 100644 --- a/src/client_server/push.rs +++ b/src/api/client_server/push.rs @@ -1,71 +1,71 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::push::{ + push::{ delete_pushrule, get_pushers, get_pushrule, get_pushrule_actions, get_pushrule_enabled, get_pushrules_all, set_pusher, set_pushrule, set_pushrule_actions, set_pushrule_enabled, RuleKind, }, }, - events::{push_rules::PushRulesEvent, EventType}, + events::{push_rules::PushRulesEvent, GlobalAccountDataEventType}, push::{ConditionalPushRuleInit, PatternedPushRuleInit, SimplePushRuleInit}, }; -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; - /// # `GET /_matrix/client/r0/pushrules` /// /// Retrieves the push rules event for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_pushrules_all_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = db + let event = services() .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - Ok(get_pushrules_all::Response { - global: event.content.global, - } - .into()) + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + Ok(get_pushrules_all::v3::Response { + global: account_data.global, + }) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Retrieves a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let event: PushRulesEvent = db + let event = services() .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let global = event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + let global = account_data.global; let rule = match body.kind { RuleKind::Override => global .override_ @@ -91,7 +91,7 @@ pub async fn get_pushrule_route( }; if let Some(rule) = rule { - Ok(get_pushrule::Response { rule }.into()) + Ok(get_pushrule::v3::Response { rule }) } else { Err(Error::BadRequest( ErrorKind::NotFound, @@ -103,17 +103,11 @@ pub async fn get_pushrule_route( /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Creates a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, req))] pub async fn set_pushrule_route( - db: DatabaseGuard, - req: Ruma>, -) -> ConduitResult { - let sender_user = req.sender_user.as_ref().expect("user is authenticated"); - let body = req.body; + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; if body.scope != "global" { return Err(Error::BadRequest( @@ -122,15 +116,22 @@ pub async fn set_pushrule_route( )); } - let mut event: PushRulesEvent = db + let event = services() .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { global.override_.replace( @@ -193,26 +194,22 @@ pub async fn set_pushrule_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; - - db.flush()?; + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; - Ok(set_pushrule::Response {}.into()) + Ok(set_pushrule::v3::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Gets the actions of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_actions_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -222,15 +219,23 @@ pub async fn get_pushrule_actions_route( )); } - let mut event: PushRulesEvent = db + let event = services() .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let global = &mut event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))? + .content; + + let global = account_data.global; let actions = match body.kind { RuleKind::Override => global .override_ @@ -255,26 +260,17 @@ pub async fn get_pushrule_actions_route( _ => None, }; - db.flush()?; - - Ok(get_pushrule_actions::Response { + Ok(get_pushrule_actions::v3::Response { actions: actions.unwrap_or_default(), - } - .into()) + }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/actions` /// /// Sets the actions of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/actions", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_actions_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -284,15 +280,22 @@ pub async fn set_pushrule_actions_route( )); } - let mut event: PushRulesEvent = db + let event = services() .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -327,26 +330,22 @@ pub async fn set_pushrule_actions_route( _ => {} }; - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; - - db.flush()?; + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; - Ok(set_pushrule_actions::Response {}.into()) + Ok(set_pushrule_actions::v3::Response {}) } /// # `GET /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Gets the enabled status of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_pushrule_enabled_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -356,15 +355,22 @@ pub async fn get_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = db + let event = services() .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let global = &mut event.content.global; + let account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = account_data.content.global; let enabled = match body.kind { RuleKind::Override => global .override_ @@ -394,23 +400,15 @@ pub async fn get_pushrule_enabled_route( _ => false, }; - db.flush()?; - - Ok(get_pushrule_enabled::Response { enabled }.into()) + Ok(get_pushrule_enabled::v3::Response { enabled }) } /// # `PUT /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}/enabled` /// /// Sets the enabled status of a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/pushrules/<_>/<_>/<_>/enabled", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn set_pushrule_enabled_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -420,15 +418,22 @@ pub async fn set_pushrule_enabled_route( )); } - let mut event: PushRulesEvent = db + let event = services() .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(mut rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -468,26 +473,22 @@ pub async fn set_pushrule_enabled_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; - db.flush()?; - - Ok(set_pushrule_enabled::Response {}.into()) + Ok(set_pushrule_enabled::v3::Response {}) } /// # `DELETE /_matrix/client/r0/pushrules/{scope}/{kind}/{ruleId}` /// /// Deletes a single specified push rule for this user. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/pushrules/<_>/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn delete_pushrule_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); if body.scope != "global" { @@ -497,15 +498,22 @@ pub async fn delete_pushrule_route( )); } - let mut event: PushRulesEvent = db + let event = services() .account_data - .get(None, sender_user, EventType::PushRules)? + .get( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? .ok_or(Error::BadRequest( ErrorKind::NotFound, "PushRules event not found.", ))?; - let global = &mut event.content.global; + let mut account_data = serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db."))?; + + let global = &mut account_data.content.global; match body.kind { RuleKind::Override => { if let Some(rule) = global.override_.get(body.rule_id.as_str()).cloned() { @@ -535,32 +543,27 @@ pub async fn delete_pushrule_route( _ => {} } - db.account_data - .update(None, sender_user, EventType::PushRules, &event, &db.globals)?; - - db.flush()?; + services().account_data.update( + None, + sender_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(account_data).expect("to json value always works"), + )?; - Ok(delete_pushrule::Response {}.into()) + Ok(delete_pushrule::v3::Response {}) } /// # `GET /_matrix/client/r0/pushers` /// /// Gets all currently active pushers for the sender user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/pushers", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_pushers_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(get_pushers::Response { - pushers: db.pusher.get_pushers(sender_user)?, - } - .into()) + Ok(get_pushers::v3::Response { + pushers: services().pusher.get_pushers(sender_user)?, + }) } /// # `POST /_matrix/client/r0/pushers/set` @@ -568,21 +571,13 @@ pub async fn get_pushers_route( /// Adds a pusher for the sender user. /// /// - TODO: Handle `append` -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/pushers/set", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn set_pushers_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let pusher = body.pusher.clone(); - db.pusher.set_pusher(sender_user, pusher)?; - - db.flush()?; + services().pusher.set_pusher(sender_user, pusher)?; - Ok(set_pusher::Response::default().into()) + Ok(set_pusher::v3::Response::default()) } diff --git a/src/api/client_server/read_marker.rs b/src/api/client_server/read_marker.rs new file mode 100644 index 00000000..d529c6a8 --- /dev/null +++ b/src/api/client_server/read_marker.rs @@ -0,0 +1,162 @@ +use crate::{services, Error, Result, Ruma}; +use ruma::{ + api::client::{error::ErrorKind, read_marker::set_read_marker, receipt::create_receipt}, + events::{ + receipt::{ReceiptThread, ReceiptType}, + RoomAccountDataEventType, + }, + MilliSecondsSinceUnixEpoch, +}; +use std::collections::BTreeMap; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` +/// +/// Sets different types of read markers. +/// +/// - Updates fully-read account data event to `fully_read` +/// - If `read_receipt` is set: Update private marker and public read receipt EDU +pub async fn set_read_marker_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if let Some(fully_read) = &body.fully_read { + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: fully_read.clone(), + }, + }; + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + )?; + } + + if body.private_read_receipt.is_some() || body.read_receipt.is_some() { + services() + .rooms + .user + .reset_notification_counts(sender_user, &body.room_id)?; + } + + if let Some(event) = &body.private_read_receipt { + services().rooms.edus.read_receipt.private_read_set( + &body.room_id, + sender_user, + services() + .rooms + .timeline + .get_pdu_count(event)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, + )?; + } + + if let Some(event) = &body.read_receipt { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + ); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event.to_owned(), receipts); + + services().rooms.edus.read_receipt.readreceipt_update( + sender_user, + &body.room_id, + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )?; + } + + Ok(set_read_marker::v3::Response {}) +} + +/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` +/// +/// Sets private read marker and public read receipt EDU. +pub async fn create_receipt_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if matches!( + &body.receipt_type, + create_receipt::v3::ReceiptType::Read | create_receipt::v3::ReceiptType::ReadPrivate + ) { + services() + .rooms + .user + .reset_notification_counts(sender_user, &body.room_id)?; + } + + match body.receipt_type { + create_receipt::v3::ReceiptType::FullyRead => { + let fully_read_event = ruma::events::fully_read::FullyReadEvent { + content: ruma::events::fully_read::FullyReadEventContent { + event_id: body.event_id.clone(), + }, + }; + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::FullyRead, + &serde_json::to_value(fully_read_event).expect("to json value always works"), + )?; + } + create_receipt::v3::ReceiptType::Read => { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert( + sender_user.clone(), + ruma::events::receipt::Receipt { + ts: Some(MilliSecondsSinceUnixEpoch::now()), + thread: ReceiptThread::Unthreaded, + }, + ); + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(body.event_id.to_owned(), receipts); + + services().rooms.edus.read_receipt.readreceipt_update( + sender_user, + &body.room_id, + ruma::events::receipt::ReceiptEvent { + content: ruma::events::receipt::ReceiptEventContent(receipt_content), + room_id: body.room_id.clone(), + }, + )?; + } + create_receipt::v3::ReceiptType::ReadPrivate => { + services().rooms.edus.read_receipt.private_read_set( + &body.room_id, + sender_user, + services() + .rooms + .timeline + .get_pdu_count(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event does not exist.", + ))?, + )?; + } + _ => return Err(Error::bad_database("Unsupported receipt type")), + } + + Ok(create_receipt::v3::Response {}) +} diff --git a/src/client_server/redact.rs b/src/api/client_server/redact.rs similarity index 55% rename from src/client_server/redact.rs rename to src/api/client_server/redact.rs index 7435c5c5..ab586c01 100644 --- a/src/client_server/redact.rs +++ b/src/api/client_server/redact.rs @@ -1,13 +1,11 @@ use std::sync::Arc; -use crate::{database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Ruma}; +use crate::{service::pdu::PduBuilder, services, Result, Ruma}; use ruma::{ - api::client::r0::redact::redact_event, - events::{room::redaction::RoomRedactionEventContent, EventType}, + api::client::redact::redact_event, + events::{room::redaction::RoomRedactionEventContent, RoomEventType}, }; -#[cfg(feature = "conduit_bin")] -use rocket::put; use serde_json::value::to_raw_value; /// # `PUT /_matrix/client/r0/rooms/{roomId}/redact/{eventId}/{txnId}` @@ -15,19 +13,15 @@ use serde_json::value::to_raw_value; /// Tries to send a redaction event into the room. /// /// - TODO: Handle txn id -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/redact/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn redact_event_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let body = body.body; let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -36,26 +30,24 @@ pub async fn redact_event_route( ); let state_lock = mutex_state.lock().await; - let event_id = db.rooms.build_and_append_pdu( + let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomRedaction, + event_type: RoomEventType::RoomRedaction, content: to_raw_value(&RoomRedactionEventContent { reason: body.reason.clone(), }) .expect("event is valid, we just created it"), unsigned: None, state_key: None, - redacts: Some(body.event_id.clone()), + redacts: Some(body.event_id.into()), }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - - Ok(redact_event::Response { event_id }.into()) + let event_id = (*event_id).to_owned(); + Ok(redact_event::v3::Response { event_id }) } diff --git a/src/api/client_server/report.rs b/src/api/client_server/report.rs new file mode 100644 index 00000000..e45820e8 --- /dev/null +++ b/src/api/client_server/report.rs @@ -0,0 +1,69 @@ +use crate::{services, utils::HtmlEscape, Error, Result, Ruma}; +use ruma::{ + api::client::{error::ErrorKind, room::report_content}, + events::room::message, + int, +}; + +/// # `POST /_matrix/client/r0/rooms/{roomId}/report/{eventId}` +/// +/// Reports an inappropriate event to homeserver admins +/// +pub async fn report_event_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let pdu = match services().rooms.timeline.get_pdu(&body.event_id)? { + Some(pdu) => pdu, + _ => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid Event ID", + )) + } + }; + + if let Some(true) = body.score.map(|s| s > int!(0) || s < int!(-100)) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Invalid score, must be within 0 to -100", + )); + }; + + if let Some(true) = body.reason.clone().map(|s| s.chars().count() > 250) { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Reason too long, should be 250 characters or fewer", + )); + }; + + services().admin + .send_message(message::RoomMessageEventContent::text_html( + format!( + "Report received from: {}\n\n\ + Event ID: {:?}\n\ + Room ID: {:?}\n\ + Sent By: {:?}\n\n\ + Report Score: {:?}\n\ + Report Reason: {:?}", + sender_user, pdu.event_id, pdu.room_id, pdu.sender, body.score, body.reason + ), + format!( + "
Report received from: {0:?}\ +
  • Event Info
    • Event ID: {1:?}\ + 🔗
    • Room ID: {2:?}\ +
    • Sent By: {3:?}
  • \ + Report Info
    • Report Score: {4:?}
    • Report Reason: {5}
  • \ +
", + sender_user, + pdu.event_id, + pdu.room_id, + pdu.sender, + body.score, + HtmlEscape(body.reason.as_deref().unwrap_or("")) + ), + )); + + Ok(report_content::v3::Response {}) +} diff --git a/src/client_server/room.rs b/src/api/client_server/room.rs similarity index 56% rename from src/client_server/room.rs rename to src/api/client_server/room.rs index 2d1fe237..097f0e14 100644 --- a/src/client_server/room.rs +++ b/src/api/client_server/room.rs @@ -1,11 +1,10 @@ use crate::{ - client_server::invite_helper, database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Error, - Ruma, + api::client_server::invite_helper, service::pdu::PduBuilder, services, Error, Result, Ruma, }; use ruma::{ api::client::{ error::ErrorKind, - r0::room::{self, aliases, create_room, get_room_event, upgrade_room}, + room::{self, aliases, create_room, get_room_event, upgrade_room}, }, events::{ room::{ @@ -20,18 +19,16 @@ use ruma::{ tombstone::RoomTombstoneEventContent, topic::RoomTopicEventContent, }, - EventType, + RoomEventType, StateEventType, }, + int, serde::JsonObject, - RoomAliasId, RoomId, RoomVersionId, + CanonicalJsonObject, OwnedRoomAliasId, RoomAliasId, RoomId, }; -use serde_json::value::to_raw_value; -use std::{cmp::max, collections::BTreeMap, convert::TryFrom, sync::Arc}; +use serde_json::{json, value::to_raw_value}; +use std::{cmp::max, collections::BTreeMap, sync::Arc}; use tracing::{info, warn}; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `POST /_matrix/client/r0/createRoom` /// /// Creates a new room. @@ -48,23 +45,20 @@ use rocket::{get, post}; /// - Send events listed in initial state /// - Send events implied by `name` and `topic` /// - Send invite events -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/createRoom", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn create_room_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { + use create_room::v3::RoomPreset; + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let room_id = RoomId::new(db.globals.server_name()); + let room_id = RoomId::new(services().globals.server_name()); - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; + services().rooms.short.get_or_create_shortroomid(&room_id)?; let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -73,9 +67,9 @@ pub async fn create_room_route( ); let state_lock = mutex_state.lock().await; - if !db.globals.allow_room_creation() + if !services().globals.allow_room_creation() && !body.from_appservice - && !db.users.is_admin(sender_user, &db.rooms, &db.globals)? + && !services().users.is_admin(sender_user)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -83,16 +77,24 @@ pub async fn create_room_route( )); } - let alias: Option = + let alias: Option = body.room_alias_name .as_ref() .map_or(Ok(None), |localpart| { // TODO: Check for invalid characters and maximum length - let alias = - RoomAliasId::try_from(format!("#{}:{}", localpart, db.globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - - if db.rooms.id_from_alias(&alias)?.is_some() { + let alias = RoomAliasId::parse(format!( + "#{}:{}", + localpart, + services().globals.server_name() + )) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + + if services() + .rooms + .alias + .resolve_local_alias(&alias)? + .is_some() + { Err(Error::BadRequest( ErrorKind::RoomInUse, "Room alias already exists.", @@ -102,12 +104,13 @@ pub async fn create_room_route( } })?; - let mut content = RoomCreateEventContent::new(sender_user.clone()); - content.federate = body.creation_content.federate; - content.predecessor = body.creation_content.predecessor.clone(); - content.room_version = match body.room_version.clone() { + let room_version = match body.room_version.clone() { Some(room_version) => { - if room_version == RoomVersionId::Version5 || room_version == RoomVersionId::Version6 { + if services() + .globals + .supported_room_versions() + .contains(&room_version) + { room_version } else { return Err(Error::BadRequest( @@ -116,13 +119,63 @@ pub async fn create_room_route( )); } } - None => RoomVersionId::Version6, + None => services().globals.default_room_version(), + }; + + let content = match &body.creation_content { + Some(content) => { + let mut content = content + .deserialize_as::() + .expect("Invalid creation content"); + content.insert( + "creator".into(), + json!(&sender_user).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, + ); + content.insert( + "room_version".into(), + json!(room_version.as_str()).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, + ); + content + } + None => { + let mut content = serde_json::from_str::( + to_raw_value(&RoomCreateEventContent::new(sender_user.clone())) + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid creation content"))? + .get(), + ) + .unwrap(); + content.insert( + "room_version".into(), + json!(room_version.as_str()).try_into().map_err(|_| { + Error::BadRequest(ErrorKind::BadJson, "Invalid creation content") + })?, + ); + content + } }; + // Validate creation content + let de_result = serde_json::from_str::( + to_raw_value(&content) + .expect("Invalid creation content") + .get(), + ); + + if de_result.is_err() { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Invalid creation content", + )); + } + // 1. The room create event - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&content).expect("event is valid, we just created it"), unsigned: None, state_key: Some("".to_owned()), @@ -130,22 +183,22 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 2. Let the room creator join - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: Some(body.is_direct), third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -154,28 +207,24 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 3. Power levels // Figure out preset. We need it for preset specific events - let preset = body - .preset - .clone() - .unwrap_or_else(|| match &body.visibility { - room::Visibility::Private => create_room::RoomPreset::PrivateChat, - room::Visibility::Public => create_room::RoomPreset::PublicChat, - _ => create_room::RoomPreset::PrivateChat, // Room visibility should not be custom - }); + let preset = body.preset.clone().unwrap_or(match &body.visibility { + room::Visibility::Private => RoomPreset::PrivateChat, + room::Visibility::Public => RoomPreset::PublicChat, + _ => RoomPreset::PrivateChat, // Room visibility should not be custom + }); let mut users = BTreeMap::new(); - users.insert(sender_user.clone(), 100.into()); + users.insert(sender_user.clone(), int!(100)); - if preset == create_room::RoomPreset::TrustedPrivateChat { + if preset == RoomPreset::TrustedPrivateChat { for invite_ in &body.invite { - users.insert(invite_.clone(), 100.into()); + users.insert(invite_.clone(), int!(100)); } } @@ -196,9 +245,9 @@ pub async fn create_room_route( } } - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_content) .expect("to_raw_value always works on serde_json::Value"), unsigned: None, @@ -207,17 +256,16 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 4. Canonical room alias if let Some(room_alias_id) = &alias { - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCanonicalAlias, + event_type: RoomEventType::RoomCanonicalAlias, content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(room_alias_id.clone()), + alias: Some(room_alias_id.to_owned()), alt_aliases: vec![], }) .expect("We checked that alias earlier, it must be fine"), @@ -227,7 +275,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } @@ -235,11 +282,11 @@ pub async fn create_room_route( // 5. Events set by preset // 5.1 Join Rules - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomJoinRules, + event_type: RoomEventType::RoomJoinRules, content: to_raw_value(&RoomJoinRulesEventContent::new(match preset { - create_room::RoomPreset::PublicChat => JoinRule::Public, + RoomPreset::PublicChat => JoinRule::Public, // according to spec "invite" is the default _ => JoinRule::Invite, })) @@ -250,14 +297,13 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 5.2 History Visibility - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomHistoryVisibility, + event_type: RoomEventType::RoomHistoryVisibility, content: to_raw_value(&RoomHistoryVisibilityEventContent::new( HistoryVisibility::Shared, )) @@ -268,16 +314,15 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 5.3 Guest Access - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomGuestAccess, + event_type: RoomEventType::RoomGuestAccess, content: to_raw_value(&RoomGuestAccessEventContent::new(match preset { - create_room::RoomPreset::PublicChat => GuestAccess::Forbidden, + RoomPreset::PublicChat => GuestAccess::Forbidden, _ => GuestAccess::CanJoin, })) .expect("event is valid, we just created it"), @@ -287,31 +332,39 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; // 6. Events listed in initial_state for event in &body.initial_state { - let pdu_builder = PduBuilder::from(event.deserialize().map_err(|e| { + let mut pdu_builder = event.deserialize_as::().map_err(|e| { warn!("Invalid initial state event: {:?}", e); Error::BadRequest(ErrorKind::InvalidParam, "Invalid initial state event.") - })?); + })?; + + // Implicit state key defaults to "" + pdu_builder.state_key.get_or_insert_with(|| "".to_owned()); // Silently skip encryption events if they are not allowed - if pdu_builder.event_type == EventType::RoomEncryption && !db.globals.allow_encryption() { + if pdu_builder.event_type == RoomEventType::RoomEncryption + && !services().globals.allow_encryption() + { continue; } - db.rooms - .build_and_append_pdu(pdu_builder, sender_user, &room_id, &db, &state_lock)?; + services().rooms.timeline.build_and_append_pdu( + pdu_builder, + sender_user, + &room_id, + &state_lock, + )?; } // 7. Events implied by name and topic if let Some(name) = &body.name { - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomName, + event_type: RoomEventType::RoomName, content: to_raw_value(&RoomNameEventContent::new(Some(name.clone()))) .expect("event is valid, we just created it"), unsigned: None, @@ -320,15 +373,14 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } if let Some(topic) = &body.topic { - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomTopic, + event_type: RoomEventType::RoomTopic, content: to_raw_value(&RoomTopicEventContent { topic: topic.clone(), }) @@ -339,7 +391,6 @@ pub async fn create_room_route( }, sender_user, &room_id, - &db, &state_lock, )?; } @@ -347,23 +398,21 @@ pub async fn create_room_route( // 8. Events implied by invite (and TODO: invite_3pid) drop(state_lock); for user_id in &body.invite { - let _ = invite_helper(sender_user, user_id, &room_id, &db, body.is_direct).await; + let _ = invite_helper(sender_user, user_id, &room_id, body.is_direct).await; } // Homeserver specific stuff if let Some(alias) = alias { - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; + services().rooms.alias.set_alias(&alias, &room_id)?; } if body.visibility == room::Visibility::Public { - db.rooms.set_public(&room_id, true)?; + services().rooms.directory.set_public(&room_id)?; } info!("{} created a room", sender_user); - db.flush()?; - - Ok(create_room::Response::new(room_id).into()) + Ok(create_room::v3::Response::new(room_id)) } /// # `GET /_matrix/client/r0/rooms/{roomId}/event/{eventId}` @@ -371,32 +420,30 @@ pub async fn create_room_route( /// Gets a single event. /// /// - You have to currently be joined to the room (TODO: Respect history visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/event/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_room_event_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - Ok(get_room_event::Response { - event: db + Ok(get_room_event::v3::Response { + event: services() .rooms + .timeline .get_pdu(&body.event_id)? .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))? .to_room_event(), - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomId}/aliases` @@ -404,35 +451,33 @@ pub async fn get_room_event_route( /// Lists all aliases of the room. /// /// - Only users joined to the room are allowed to call this TODO: Allow any user to call it if history_visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/aliases", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_room_aliases_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !db.rooms.is_joined(sender_user, &body.room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - Ok(aliases::Response { - aliases: db + Ok(aliases::v3::Response { + aliases: services() .rooms - .room_aliases(&body.room_id) + .alias + .local_aliases_for_room(&body.room_id) .filter_map(|a| a.ok()) .collect(), - } - .into()) + }) } -/// # `GET /_matrix/client/r0/rooms/{roomId}/upgrade` +/// # `POST /_matrix/client/r0/rooms/{roomId}/upgrade` /// /// Upgrades the room. /// @@ -442,21 +487,16 @@ pub async fn get_room_aliases_route( /// - Transfers some state events /// - Moves local aliases /// - Modifies old room power levels to prevent users from speaking -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/upgrade", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn upgrade_room_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - if !matches!( - body.new_version, - RoomVersionId::Version5 | RoomVersionId::Version6 - ) { + if !services() + .globals + .supported_room_versions() + .contains(&body.new_version) + { return Err(Error::BadRequest( ErrorKind::UnsupportedRoomVersion, "This server does not support that room version.", @@ -464,12 +504,15 @@ pub async fn upgrade_room_route( } // Create a replacement room - let replacement_room = RoomId::new(db.globals.server_name()); - db.rooms - .get_or_create_shortroomid(&replacement_room, &db.globals)?; + let replacement_room = RoomId::new(services().globals.server_name()); + services() + .rooms + .short + .get_or_create_shortroomid(&replacement_room)?; let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -480,9 +523,9 @@ pub async fn upgrade_room_route( // Send a m.room.tombstone event to the old room to indicate that it is not intended to be used any further // Fail if the sender does not have the required permissions - let tombstone_event_id = db.rooms.build_and_append_pdu( + let tombstone_event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomTombstone, + event_type: RoomEventType::RoomTombstone, content: to_raw_value(&RoomTombstoneEventContent { body: "This room has been replaced".to_owned(), replacement_room: replacement_room.clone(), @@ -494,14 +537,14 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; // Change lock to replacement room drop(state_lock); let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() @@ -510,32 +553,61 @@ pub async fn upgrade_room_route( ); let state_lock = mutex_state.lock().await; - // Get the old room federations status - let federate = serde_json::from_str::( - db.rooms - .room_state_get(&body.room_id, &EventType::RoomCreate, "")? + // Get the old room creation event + let mut create_event_content = serde_json::from_str::( + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomCreate, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content .get(), ) - .map_err(|_| Error::bad_database("Invalid room event in database."))? - .federate; + .map_err(|_| Error::bad_database("Invalid room event in database."))?; // Use the m.room.tombstone event as the predecessor let predecessor = Some(ruma::events::room::create::PreviousRoom::new( body.room_id.clone(), - tombstone_event_id, + (*tombstone_event_id).to_owned(), )); // Send a m.room.create event containing a predecessor field and the applicable room_version - let mut create_event_content = RoomCreateEventContent::new(sender_user.clone()); - create_event_content.federate = federate; - create_event_content.room_version = body.new_version.clone(); - create_event_content.predecessor = predecessor; + create_event_content.insert( + "creator".into(), + json!(&sender_user) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + create_event_content.insert( + "room_version".into(), + json!(&body.new_version) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + create_event_content.insert( + "predecessor".into(), + json!(predecessor) + .try_into() + .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Error forming creation event"))?, + ); + + // Validate creation event content + let de_result = serde_json::from_str::( + to_raw_value(&create_event_content) + .expect("Error forming creation event") + .get(), + ); + + if de_result.is_err() { + return Err(Error::BadRequest( + ErrorKind::BadJson, + "Error forming creation event", + )); + } - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomCreate, + event_type: RoomEventType::RoomCreate, content: to_raw_value(&create_event_content) .expect("event is valid, we just created it"), unsigned: None, @@ -544,22 +616,22 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; // Join the new room - db.rooms.build_and_append_pdu( + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomMember, + event_type: RoomEventType::RoomMember, content: to_raw_value(&RoomMemberEventContent { membership: MembershipState::Join, - displayname: db.users.displayname(sender_user)?, - avatar_url: db.users.avatar_url(sender_user)?, + displayname: services().users.displayname(sender_user)?, + avatar_url: services().users.avatar_url(sender_user)?, is_direct: None, third_party_invite: None, - blurhash: db.users.blurhash(sender_user)?, + blurhash: services().users.blurhash(sender_user)?, reason: None, + join_authorized_via_users_server: None, }) .expect("event is valid, we just created it"), unsigned: None, @@ -568,33 +640,37 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; // Recommended transferable state events list from the specs let transferable_state_events = vec![ - EventType::RoomServerAcl, - EventType::RoomEncryption, - EventType::RoomName, - EventType::RoomAvatar, - EventType::RoomTopic, - EventType::RoomGuestAccess, - EventType::RoomHistoryVisibility, - EventType::RoomJoinRules, - EventType::RoomPowerLevels, + StateEventType::RoomServerAcl, + StateEventType::RoomEncryption, + StateEventType::RoomName, + StateEventType::RoomAvatar, + StateEventType::RoomTopic, + StateEventType::RoomGuestAccess, + StateEventType::RoomHistoryVisibility, + StateEventType::RoomJoinRules, + StateEventType::RoomPowerLevels, ]; // Replicate transferable state events to the new room for event_type in transferable_state_events { - let event_content = match db.rooms.room_state_get(&body.room_id, &event_type, "")? { - Some(v) => v.content.clone(), - None => continue, // Skipping missing events. - }; - - db.rooms.build_and_append_pdu( + let event_content = + match services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &event_type, "")? + { + Some(v) => v.content.clone(), + None => continue, // Skipping missing events. + }; + + services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type, + event_type: event_type.to_string().into(), content: event_content, unsigned: None, state_key: Some("".to_owned()), @@ -602,21 +678,29 @@ pub async fn upgrade_room_route( }, sender_user, &replacement_room, - &db, &state_lock, )?; } // Moves any local aliases to the new room - for alias in db.rooms.room_aliases(&body.room_id).filter_map(|r| r.ok()) { - db.rooms - .set_alias(&alias, Some(&replacement_room), &db.globals)?; + for alias in services() + .rooms + .alias + .local_aliases_for_room(&body.room_id) + .filter_map(|r| r.ok()) + { + services() + .rooms + .alias + .set_alias(&alias, &replacement_room)?; } // Get the old room power levels let mut power_levels_event_content: RoomPowerLevelsEventContent = serde_json::from_str( - db.rooms - .room_state_get(&body.room_id, &EventType::RoomPowerLevels, "")? + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomPowerLevels, "")? .ok_or_else(|| Error::bad_database("Found room without m.room.create event."))? .content .get(), @@ -624,17 +708,14 @@ pub async fn upgrade_room_route( .map_err(|_| Error::bad_database("Invalid room event in database."))?; // Setting events_default and invite to the greater of 50 and users_default + 1 - let new_level = max( - 50.into(), - power_levels_event_content.users_default + 1.into(), - ); + let new_level = max(int!(50), power_levels_event_content.users_default + int!(1)); power_levels_event_content.events_default = new_level; power_levels_event_content.invite = new_level; // Modify the power levels in the old room to prevent sending of events and inviting new users - let _ = db.rooms.build_and_append_pdu( + let _ = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type: EventType::RoomPowerLevels, + event_type: RoomEventType::RoomPowerLevels, content: to_raw_value(&power_levels_event_content) .expect("event is valid, we just created it"), unsigned: None, @@ -643,14 +724,11 @@ pub async fn upgrade_room_route( }, sender_user, &body.room_id, - &db, &state_lock, )?; drop(state_lock); - db.flush()?; - // Return the replacement room id - Ok(upgrade_room::Response { replacement_room }.into()) + Ok(upgrade_room::v3::Response { replacement_room }) } diff --git a/src/client_server/search.rs b/src/api/client_server/search.rs similarity index 77% rename from src/client_server/search.rs rename to src/api/client_server/search.rs index 59c9480a..1ba9cdfe 100644 --- a/src/client_server/search.rs +++ b/src/api/client_server/search.rs @@ -1,9 +1,12 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; -use ruma::api::client::{error::ErrorKind, r0::search::search_events}; +use crate::{services, Error, Result, Ruma}; +use ruma::api::client::{ + error::ErrorKind, + search::search_events::{ + self, + v3::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}, + }, +}; -#[cfg(feature = "conduit_bin")] -use rocket::post; -use search_events::{EventContextResult, ResultCategories, ResultRoomEvents, SearchResult}; use std::collections::BTreeMap; /// # `POST /_matrix/client/r0/search` @@ -11,22 +14,18 @@ use std::collections::BTreeMap; /// Searches rooms for messages. /// /// - Only works if the user is currently joined to the room (TODO: Respect history visibility) -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/search", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn search_events_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let search_criteria = body.search_categories.room_events.as_ref().unwrap(); - let filter = search_criteria.filter.clone().unwrap_or_default(); + let filter = &search_criteria.filter; let room_ids = filter.rooms.clone().unwrap_or_else(|| { - db.rooms + services() + .rooms + .state_cache .rooms_joined(sender_user) .filter_map(|r| r.ok()) .collect() @@ -37,18 +36,24 @@ pub async fn search_events_route( let mut searches = Vec::new(); for room_id in room_ids { - if !db.rooms.is_joined(sender_user, &room_id)? { + if !services() + .rooms + .state_cache + .is_joined(sender_user, &room_id)? + { return Err(Error::BadRequest( ErrorKind::Forbidden, "You don't have permission to view this room.", )); } - let search = db + if let Some(search) = services() .rooms - .search_pdus(&room_id, &search_criteria.search_term)?; - - searches.push(search.0.peekable()); + .search + .search_pdus(&room_id, &search_criteria.search_term)? + { + searches.push(search.0.peekable()); + } } let skip = match body.next_batch.as_ref().map(|s| s.parse()) { @@ -86,8 +91,9 @@ pub async fn search_events_route( start: None, }, rank: None, - result: db + result: services() .rooms + .timeline .get_pdu_from_id(result)? .map(|pdu| pdu.to_room_event()), }) @@ -103,7 +109,7 @@ pub async fn search_events_route( Some((skip + limit).to_string()) }; - Ok(search_events::Response::new(ResultCategories { + Ok(search_events::v3::Response::new(ResultCategories { room_events: ResultRoomEvents { count: Some((results.len() as u32).into()), // TODO: set this to none. Element shouldn't depend on it groups: BTreeMap::new(), // TODO @@ -116,6 +122,5 @@ pub async fn search_events_route( .map(str::to_lowercase) .collect(), }, - }) - .into()) + })) } diff --git a/src/client_server/session.rs b/src/api/client_server/session.rs similarity index 63% rename from src/client_server/session.rs rename to src/api/client_server/session.rs index 61e5519a..7c8c1288 100644 --- a/src/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -1,12 +1,10 @@ use super::{DEVICE_ID_LENGTH, TOKEN_LENGTH}; -use crate::{database::DatabaseGuard, utils, ConduitResult, Error, Ruma}; +use crate::{services, utils, Error, Result, Ruma}; use ruma::{ api::client::{ error::ErrorKind, - r0::{ - session::{get_login_types, login, logout, logout_all}, - uiaa::IncomingUserIdentifier, - }, + session::{get_login_types, login, logout, logout_all}, + uiaa::IncomingUserIdentifier, }, UserId, }; @@ -16,25 +14,19 @@ use tracing::info; #[derive(Debug, Deserialize)] struct Claims { sub: String, - exp: usize, + //exp: usize, } -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - /// # `GET /_matrix/client/r0/login` /// /// Get the supported login types of this server. One of these should be used as the `type` field /// when logging in. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/login"))] -#[tracing::instrument] -pub async fn get_login_types_route() -> ConduitResult { - Ok( - get_login_types::Response::new(vec![get_login_types::LoginType::Password( - Default::default(), - )]) - .into(), - ) +pub async fn get_login_types_route( + _body: Ruma, +) -> Result { + Ok(get_login_types::v3::Response::new(vec![ + get_login_types::v3::LoginType::Password(Default::default()), + ])) } /// # `POST /_matrix/client/r0/login` @@ -48,36 +40,31 @@ pub async fn get_login_types_route() -> ConduitResult /// /// Note: You can use [`GET /_matrix/client/r0/login`](fn.get_supported_versions_route.html) to see /// supported login types. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/login", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn login_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { +pub async fn login_route(body: Ruma) -> Result { // Validate login method // TODO: Other login methods let user_id = match &body.login_info { - login::IncomingLoginInfo::Password(login::IncomingPassword { + login::v3::IncomingLoginInfo::Password(login::v3::IncomingPassword { identifier, password, }) => { - let username = if let IncomingUserIdentifier::MatrixId(matrix_id) = identifier { - matrix_id + let username = if let IncomingUserIdentifier::UserIdOrLocalpart(user_id) = identifier { + user_id.to_lowercase() } else { return Err(Error::BadRequest(ErrorKind::Forbidden, "Bad login type.")); }; let user_id = - UserId::parse_with_server_name(username.to_owned(), db.globals.server_name()) + UserId::parse_with_server_name(username, services().globals.server_name()) .map_err(|_| { Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid.") })?; - let hash = db.users.password_hash(&user_id)?.ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "Wrong username or password.", - ))?; + let hash = services() + .users + .password_hash(&user_id)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "Wrong username or password.", + ))?; if hash.is_empty() { return Err(Error::BadRequest( @@ -97,16 +84,16 @@ pub async fn login_route( user_id } - login::IncomingLoginInfo::Token(login::IncomingToken { token }) => { - if let Some(jwt_decoding_key) = db.globals.jwt_decoding_key() { + login::v3::IncomingLoginInfo::Token(login::v3::IncomingToken { token }) => { + if let Some(jwt_decoding_key) = services().globals.jwt_decoding_key() { let token = jsonwebtoken::decode::( token, jwt_decoding_key, &jsonwebtoken::Validation::default(), ) .map_err(|_| Error::BadRequest(ErrorKind::InvalidUsername, "Token is invalid."))?; - let username = token.claims.sub; - UserId::parse_with_server_name(username, db.globals.server_name()).map_err( + let username = token.claims.sub.to_lowercase(); + UserId::parse_with_server_name(username, services().globals.server_name()).map_err( |_| Error::BadRequest(ErrorKind::InvalidUsername, "Username is invalid."), )? } else { @@ -135,15 +122,16 @@ pub async fn login_route( // Determine if device_id was provided and exists in the db for this user let device_exists = body.device_id.as_ref().map_or(false, |device_id| { - db.users + services() + .users .all_device_ids(&user_id) .any(|x| x.as_ref().map_or(false, |v| v == device_id)) }); if device_exists { - db.users.set_token(&user_id, &device_id, &token)?; + services().users.set_token(&user_id, &device_id, &token)?; } else { - db.users.create_device( + services().users.create_device( &user_id, &device_id, &token, @@ -153,16 +141,15 @@ pub async fn login_route( info!("{} logged in", user_id); - db.flush()?; - - Ok(login::Response { + Ok(login::v3::Response { user_id, access_token: token, - home_server: Some(db.globals.server_name().to_owned()), + home_server: Some(services().globals.server_name().to_owned()), device_id, well_known: None, - } - .into()) + refresh_token: None, + expires_in: None, + }) } /// # `POST /_matrix/client/r0/logout` @@ -173,23 +160,13 @@ pub async fn login_route( /// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) /// - Forgets to-device events /// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/logout", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn logout_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { +pub async fn logout_route(body: Ruma) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - db.users.remove_device(sender_user, sender_device)?; - - db.flush()?; + services().users.remove_device(sender_user, sender_device)?; - Ok(logout::Response::new().into()) + Ok(logout::v3::Response::new()) } /// # `POST /_matrix/client/r0/logout/all` @@ -203,22 +180,14 @@ pub async fn logout_route( /// /// Note: This is equivalent to calling [`GET /_matrix/client/r0/logout`](fn.logout_route.html) /// from each device of this user. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/logout/all", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn logout_all_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - for device_id in db.users.all_device_ids(sender_user).flatten() { - db.users.remove_device(sender_user, &device_id)?; + for device_id in services().users.all_device_ids(sender_user).flatten() { + services().users.remove_device(sender_user, &device_id)?; } - db.flush()?; - - Ok(logout_all::Response::new().into()) + Ok(logout_all::v3::Response::new()) } diff --git a/src/client_server/state.rs b/src/api/client_server/state.rs similarity index 71% rename from src/client_server/state.rs rename to src/api/client_server/state.rs index 307bccab..36466b8f 100644 --- a/src/client_server/state.rs +++ b/src/api/client_server/state.rs @@ -1,27 +1,22 @@ use std::sync::Arc; -use crate::{ - database::DatabaseGuard, pdu::PduBuilder, ConduitResult, Database, Error, Result, Ruma, -}; +use crate::{service::pdu::PduBuilder, services, Error, Result, Ruma, RumaResponse}; use ruma::{ api::client::{ error::ErrorKind, - r0::state::{get_state_events, get_state_events_for_key, send_state_event}, + state::{get_state_events, get_state_events_for_key, send_state_event}, }, events::{ room::{ canonical_alias::RoomCanonicalAliasEventContent, history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, }, - AnyStateEventContent, EventType, + AnyStateEventContent, StateEventType, }, serde::Raw, EventId, RoomId, UserId, }; -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}/{stateKey}` /// /// Sends a state event into the room. @@ -29,30 +24,22 @@ use rocket::{get, put}; /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_key_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let event_id = send_state_event_for_key_helper( - &db, sender_user, &body.room_id, - EventType::from(&body.event_type), + &body.event_type, &body.body.body, // Yes, I hate it too body.state_key.to_owned(), ) .await?; - db.flush()?; - - Ok(send_state_event::Response { event_id }.into()) + let event_id = (*event_id).to_owned(); + Ok(send_state_event::v3::Response { event_id }) } /// # `PUT /_matrix/client/r0/rooms/{roomId}/state/{eventType}` @@ -62,19 +49,13 @@ pub async fn send_state_event_for_key_route( /// - The only requirement for the content is that it has to be valid json /// - Tries to send the event into the room, auth rules will determine if it is allowed /// - If event is new canonical_alias: Rejects if alias is incorrect -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn send_state_event_for_empty_key_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); // Forbid m.room.encryption if encryption is disabled - if &body.event_type == "m.room.encryption" && !db.globals.allow_encryption() { + if body.event_type == StateEventType::RoomEncryption && !services().globals.allow_encryption() { return Err(Error::BadRequest( ErrorKind::Forbidden, "Encryption has been disabled", @@ -82,18 +63,16 @@ pub async fn send_state_event_for_empty_key_route( } let event_id = send_state_event_for_key_helper( - &db, sender_user, &body.room_id, - EventType::from(&body.event_type), + &body.event_type.to_string().into(), &body.body.body, body.state_key.to_owned(), ) .await?; - db.flush()?; - - Ok(send_state_event::Response { event_id }.into()) + let event_id = (*event_id).to_owned(); + Ok(send_state_event::v3::Response { event_id }.into()) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state` @@ -101,24 +80,23 @@ pub async fn send_state_event_for_empty_key_route( /// Get all state events for a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -137,15 +115,16 @@ pub async fn get_state_events_route( )); } - Ok(get_state_events::Response { - room_state: db + Ok(get_state_events::v3::Response { + room_state: services() .rooms - .room_state_full(&body.room_id)? + .state_accessor + .room_state_full(&body.room_id) + .await? .values() .map(|pdu| pdu.to_state_event()) .collect(), - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}/{stateKey}` @@ -153,24 +132,23 @@ pub async fn get_state_events_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_key_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -189,19 +167,19 @@ pub async fn get_state_events_for_key_route( )); } - let event = db + let event = services() .rooms + .state_accessor .room_state_get(&body.room_id, &body.event_type, &body.state_key)? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", ))?; - Ok(get_state_events_for_key::Response { + Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, - } - .into()) + }) } /// # `GET /_matrix/client/r0/rooms/{roomid}/state/{eventType}` @@ -209,24 +187,23 @@ pub async fn get_state_events_for_key_route( /// Get single state event of a room. /// /// - If not joined: Only works if current room history visibility is world readable -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/state/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn get_state_events_for_empty_key_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result> { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); #[allow(clippy::blocks_in_if_conditions)] // Users not in the room should not be able to access the state unless history_visibility is // WorldReadable - if !db.rooms.is_joined(sender_user, &body.room_id)? + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? && !matches!( - db.rooms - .room_state_get(&body.room_id, &EventType::RoomHistoryVisibility, "")? + services() + .rooms + .state_accessor + .room_state_get(&body.room_id, &StateEventType::RoomHistoryVisibility, "")? .map(|event| { serde_json::from_str(event.content.get()) .map(|e: RoomHistoryVisibilityEventContent| e.history_visibility) @@ -245,15 +222,16 @@ pub async fn get_state_events_for_empty_key_route( )); } - let event = db + let event = services() .rooms + .state_accessor .room_state_get(&body.room_id, &body.event_type, "")? .ok_or(Error::BadRequest( ErrorKind::NotFound, "State event not found.", ))?; - Ok(get_state_events_for_key::Response { + Ok(get_state_events_for_key::v3::Response { content: serde_json::from_str(event.content.get()) .map_err(|_| Error::bad_database("Invalid event content in database"))?, } @@ -261,13 +239,12 @@ pub async fn get_state_events_for_empty_key_route( } async fn send_state_event_for_key_helper( - db: &Database, sender: &UserId, room_id: &RoomId, - event_type: EventType, + event_type: &StateEventType, json: &Raw, state_key: String, -) -> Result { +) -> Result> { let sender_user = sender; // TODO: Review this check, error if event is unparsable, use event type, allow alias if it @@ -282,10 +259,11 @@ async fn send_state_event_for_key_helper( } for alias in aliases { - if alias.server_name() != db.globals.server_name() - || db + if alias.server_name() != services().globals.server_name() + || services() .rooms - .id_from_alias(&alias)? + .alias + .resolve_local_alias(&alias)? .filter(|room| room == room_id) // Make sure it's the right room .is_none() { @@ -299,18 +277,19 @@ async fn send_state_event_for_key_helper( } let mutex_state = Arc::clone( - db.globals + services() + .globals .roomid_mutex_state .write() .unwrap() - .entry(room_id.clone()) + .entry(room_id.to_owned()) .or_default(), ); let state_lock = mutex_state.lock().await; - let event_id = db.rooms.build_and_append_pdu( + let event_id = services().rooms.timeline.build_and_append_pdu( PduBuilder { - event_type, + event_type: event_type.to_string().into(), content: serde_json::from_str(json.json().get()).expect("content is valid json"), unsigned: None, state_key: Some(state_key), @@ -318,7 +297,6 @@ async fn send_state_event_for_key_helper( }, sender_user, room_id, - db, &state_lock, )?; diff --git a/src/api/client_server/sync.rs b/src/api/client_server/sync.rs new file mode 100644 index 00000000..828ae19c --- /dev/null +++ b/src/api/client_server/sync.rs @@ -0,0 +1,1116 @@ +use crate::{services, Error, Result, Ruma, RumaResponse}; +use ruma::{ + api::client::{ + filter::{IncomingFilterDefinition, LazyLoadOptions}, + sync::sync_events::{self, DeviceLists, UnreadNotificationsCount}, + uiaa::UiaaResponse, + }, + events::{ + room::member::{MembershipState, RoomMemberEventContent}, + RoomEventType, StateEventType, + }, + serde::Raw, + OwnedDeviceId, OwnedUserId, RoomId, UserId, +}; +use std::{ + collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, + sync::Arc, + time::Duration, +}; +use tokio::sync::watch::Sender; +use tracing::error; + +/// # `GET /_matrix/client/r0/sync` +/// +/// Synchronize the client's state with the latest state on the server. +/// +/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a +/// previous request for incremental syncs. +/// +/// Calling this endpoint without a `since` parameter returns: +/// - Some of the most recent events of each timeline +/// - Notification counts for each room +/// - Joined and invited member counts, heroes +/// - All state events +/// +/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns: +/// For joined rooms: +/// - Some of the most recent events of each timeline that happened after since +/// - If user joined the room after since: All state events (unless lazy loading is activated) and +/// all device list updates in that room +/// - If the user was already in the room: A list of all events that are in the state now, but were +/// not in the state at `since` +/// - If the state we send contains a member event: Joined and invited member counts, heroes +/// - Device list updates that happened after `since` +/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts +/// - EDUs that are active now (read receipts, typing updates, presence) +/// - TODO: Allow multiple sync streams to support Pantalaimon +/// +/// For invited rooms: +/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite +/// +/// For left rooms: +/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave) +/// +/// - Sync is handled in an async task, multiple requests from the same device with the same +/// `since` will be cached +pub async fn sync_events_route( + body: Ruma, +) -> Result> { + let sender_user = body.sender_user.expect("user is authenticated"); + let sender_device = body.sender_device.expect("user is authenticated"); + let body = body.body; + + let mut rx = match services() + .globals + .sync_receivers + .write() + .unwrap() + .entry((sender_user.clone(), sender_device.clone())) + { + Entry::Vacant(v) => { + let (tx, rx) = tokio::sync::watch::channel(None); + + v.insert((body.since.to_owned(), rx.clone())); + + tokio::spawn(sync_helper_wrapper( + sender_user.clone(), + sender_device.clone(), + body, + tx, + )); + + rx + } + Entry::Occupied(mut o) => { + if o.get().0 != body.since { + let (tx, rx) = tokio::sync::watch::channel(None); + + o.insert((body.since.clone(), rx.clone())); + + tokio::spawn(sync_helper_wrapper( + sender_user.clone(), + sender_device.clone(), + body, + tx, + )); + + rx + } else { + o.get().1.clone() + } + } + }; + + let we_have_to_wait = rx.borrow().is_none(); + if we_have_to_wait { + if let Err(e) = rx.changed().await { + error!("Error waiting for sync: {}", e); + } + } + + let result = match rx + .borrow() + .as_ref() + .expect("When sync channel changes it's always set to some") + { + Ok(response) => Ok(response.clone()), + Err(error) => Err(error.to_response()), + }; + + result +} + +async fn sync_helper_wrapper( + sender_user: OwnedUserId, + sender_device: OwnedDeviceId, + body: sync_events::v3::IncomingRequest, + tx: Sender>>, +) { + let since = body.since.clone(); + + let r = sync_helper(sender_user.clone(), sender_device.clone(), body).await; + + if let Ok((_, caching_allowed)) = r { + if !caching_allowed { + match services() + .globals + .sync_receivers + .write() + .unwrap() + .entry((sender_user, sender_device)) + { + Entry::Occupied(o) => { + // Only remove if the device didn't start a different /sync already + if o.get().0 == since { + o.remove(); + } + } + Entry::Vacant(_) => {} + } + } + } + + let _ = tx.send(Some(r.map(|(r, _)| r))); +} + +async fn sync_helper( + sender_user: OwnedUserId, + sender_device: OwnedDeviceId, + body: sync_events::v3::IncomingRequest, + // bool = caching allowed +) -> Result<(sync_events::v3::Response, bool), Error> { + use sync_events::v3::{ + Ephemeral, GlobalAccountData, IncomingFilter, InviteState, InvitedRoom, JoinedRoom, + LeftRoom, Presence, RoomAccountData, RoomSummary, Rooms, State, Timeline, ToDevice, + }; + + // TODO: match body.set_presence { + services().rooms.edus.presence.ping_presence(&sender_user)?; + + // Setup watchers, so if there's no response, we can wait for them + let watcher = services().globals.watch(&sender_user, &sender_device); + + let next_batch = services().globals.current_count()?; + let next_batch_string = next_batch.to_string(); + + // Load filter + let filter = match body.filter { + None => IncomingFilterDefinition::default(), + Some(IncomingFilter::FilterDefinition(filter)) => filter, + Some(IncomingFilter::FilterId(filter_id)) => services() + .users + .get_filter(&sender_user, &filter_id)? + .unwrap_or_default(), + }; + + let (lazy_load_enabled, lazy_load_send_redundant) = match filter.room.state.lazy_load_options { + LazyLoadOptions::Enabled { + include_redundant_members: redundant, + } => (true, redundant), + _ => (false, false), + }; + + let mut joined_rooms = BTreeMap::new(); + let since = body + .since + .clone() + .and_then(|string| string.parse().ok()) + .unwrap_or(0); + + let mut presence_updates = HashMap::new(); + let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in + let mut device_list_updates = HashSet::new(); + let mut device_list_left = HashSet::new(); + + // Look for device list updates of this account + device_list_updates.extend( + services() + .users + .keys_changed(sender_user.as_ref(), since, None) + .filter_map(|r| r.ok()), + ); + + let all_joined_rooms = services() + .rooms + .state_cache + .rooms_joined(&sender_user) + .collect::>(); + for room_id in all_joined_rooms { + let room_id = room_id?; + + { + // Get and drop the lock to wait for remaining operations to finish + // This will make sure the we have all events until next_batch + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } + + let timeline_pdus; + let limited; + if services() + .rooms + .timeline + .last_timeline_count(&sender_user, &room_id)? + > since + { + let mut non_timeline_pdus = services() + .rooms + .timeline + .pdus_until(&sender_user, &room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .take_while(|(pduid, _)| { + services() + .rooms + .timeline + .pdu_count(pduid) + .map_or(false, |count| count > since) + }); + + // Take the last 10 events for the timeline + timeline_pdus = non_timeline_pdus + .by_ref() + .take(10) + .collect::>() + .into_iter() + .rev() + .collect::>(); + + // They /sync response doesn't always return all messages, so we say the output is + // limited unless there are events in non_timeline_pdus + limited = non_timeline_pdus.next().is_some(); + } else { + timeline_pdus = Vec::new(); + limited = false; + } + + let send_notification_counts = !timeline_pdus.is_empty() + || services() + .rooms + .edus + .read_receipt + .last_privateread_update(&sender_user, &room_id)? + > since; + + let mut timeline_users = HashSet::new(); + for (_, event) in &timeline_pdus { + timeline_users.insert(event.sender.as_str().to_owned()); + } + + services().rooms.lazy_loading.lazy_load_confirm_delivery( + &sender_user, + &sender_device, + &room_id, + since, + )?; + + // Database queries: + + let current_shortstatehash = + if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + s + } else { + error!("Room {} has no state", room_id); + continue; + }; + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; + + // Calculates joined_member_count, invited_member_count and heroes + let calculate_counts = || { + let joined_member_count = services() + .rooms + .state_cache + .room_joined_count(&room_id)? + .unwrap_or(0); + let invited_member_count = services() + .rooms + .state_cache + .room_invited_count(&room_id)? + .unwrap_or(0); + + // Recalculate heroes (first 5 members) + let mut heroes = Vec::new(); + + if joined_member_count + invited_member_count <= 5 { + // Go through all PDUs and for each member event, check if the user is still joined or + // invited until we have 5 or we reach the end + + for hero in services() + .rooms + .timeline + .all_pdus(&sender_user, &room_id)? + .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus + .filter(|(_, pdu)| pdu.kind == RoomEventType::RoomMember) + .map(|(_, pdu)| { + let content: RoomMemberEventContent = + serde_json::from_str(pdu.content.get()).map_err(|_| { + Error::bad_database("Invalid member event in database.") + })?; + + if let Some(state_key) = &pdu.state_key { + let user_id = UserId::parse(state_key.clone()).map_err(|_| { + Error::bad_database("Invalid UserId in member PDU.") + })?; + + // The membership was and still is invite or join + if matches!( + content.membership, + MembershipState::Join | MembershipState::Invite + ) && (services().rooms.state_cache.is_joined(&user_id, &room_id)? + || services() + .rooms + .state_cache + .is_invited(&user_id, &room_id)?) + { + Ok::<_, Error>(Some(state_key.clone())) + } else { + Ok(None) + } + } else { + Ok(None) + } + }) + // Filter out buggy users + .filter_map(|u| u.ok()) + // Filter for possible heroes + .flatten() + { + if heroes.contains(&hero) || hero == sender_user.as_str() { + continue; + } + + heroes.push(hero); + } + } + + Ok::<_, Error>(( + Some(joined_member_count), + Some(invited_member_count), + heroes, + )) + }; + + let ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) = if since_shortstatehash.is_none() { + // Probably since = 0, we will do an initial sync + + let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; + + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + let mut i = 0; + for (shortstatekey, id) in current_state_ids { + let (event_type, state_key) = services() + .rooms + .short + .get_statekey_from_short(shortstatekey)?; + + if event_type != StateEventType::RoomMember { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } else if !lazy_load_enabled + || body.full_state + || timeline_users.contains(&state_key) + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key + { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + // This check is in case a bad user ID made it into the database + if let Ok(uid) = UserId::parse(&state_key) { + lazy_loaded.insert(uid); + } + state_events.push(pdu); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + + // Reset lazy loading because this is an initial sync + services().rooms.lazy_loading.lazy_load_reset( + &sender_user, + &sender_device, + &room_id, + )?; + + // The state_events above should contain all timeline_users, let's mark them as lazy + // loaded. + services().rooms.lazy_loading.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batch, + ); + + ( + heroes, + joined_member_count, + invited_member_count, + true, + state_events, + ) + } else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { + // No state changes + (Vec::new(), None, None, false, Vec::new()) + } else { + // Incremental /sync + let since_shortstatehash = since_shortstatehash.unwrap(); + + let since_sender_member: Option = services() + .rooms + .state_accessor + .state_get( + since_shortstatehash, + &StateEventType::RoomMember, + sender_user.as_str(), + )? + .and_then(|pdu| { + serde_json::from_str(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid PDU in database.")) + .ok() + }); + + let joined_since_last_sync = since_sender_member + .map_or(true, |member| member.membership != MembershipState::Join); + + let mut state_events = Vec::new(); + let mut lazy_loaded = HashSet::new(); + + if since_shortstatehash != current_shortstatehash { + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_shortstatehash) + .await?; + let since_state_ids = services() + .rooms + .state_accessor + .state_full_ids(since_shortstatehash) + .await?; + + for (key, id) in current_state_ids { + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + if pdu.kind == RoomEventType::RoomMember { + match UserId::parse( + pdu.state_key + .as_ref() + .expect("State event has state key") + .clone(), + ) { + Ok(state_key_userid) => { + lazy_loaded.insert(state_key_userid); + } + Err(e) => error!("Invalid state key for member event: {}", e), + } + } + + state_events.push(pdu); + tokio::task::yield_now().await; + } + } + } + + for (_, event) in &timeline_pdus { + if lazy_loaded.contains(&event.sender) { + continue; + } + + if !services().rooms.lazy_loading.lazy_load_was_sent_before( + &sender_user, + &sender_device, + &room_id, + &event.sender, + )? || lazy_load_send_redundant + { + if let Some(member_event) = services().rooms.state_accessor.room_state_get( + &room_id, + &StateEventType::RoomMember, + event.sender.as_str(), + )? { + lazy_loaded.insert(event.sender.clone()); + state_events.push(member_event); + } + } + } + + services().rooms.lazy_loading.lazy_load_mark_sent( + &sender_user, + &sender_device, + &room_id, + lazy_loaded, + next_batch, + ); + + let encrypted_room = services() + .rooms + .state_accessor + .state_get(current_shortstatehash, &StateEventType::RoomEncryption, "")? + .is_some(); + + let since_encryption = services().rooms.state_accessor.state_get( + since_shortstatehash, + &StateEventType::RoomEncryption, + "", + )?; + + // Calculations: + let new_encrypted_room = encrypted_room && since_encryption.is_none(); + + let send_member_count = state_events + .iter() + .any(|event| event.kind == RoomEventType::RoomMember); + + if encrypted_room { + for state_event in &state_events { + if state_event.kind != RoomEventType::RoomMember { + continue; + } + + if let Some(state_key) = &state_event.state_key { + let user_id = UserId::parse(state_key.clone()) + .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; + + if user_id == sender_user { + continue; + } + + let new_membership = serde_json::from_str::( + state_event.content.get(), + ) + .map_err(|_| Error::bad_database("Invalid PDU in database."))? + .membership; + + match new_membership { + MembershipState::Join => { + // A new user joined an encrypted room + if !share_encrypted_room(&sender_user, &user_id, &room_id)? { + device_list_updates.insert(user_id); + } + } + MembershipState::Leave => { + // Write down users that have left encrypted rooms we are in + left_encrypted_users.insert(user_id); + } + _ => {} + } + } + } + } + + if joined_since_last_sync && encrypted_room || new_encrypted_room { + // If the user is in a new encrypted room, give them all joined users + device_list_updates.extend( + services() + .rooms + .state_cache + .room_members(&room_id) + .flatten() + .filter(|user_id| { + // Don't send key updates from the sender to the sender + &sender_user != user_id + }) + .filter(|user_id| { + // Only send keys if the sender doesn't share an encrypted room with the target already + !share_encrypted_room(&sender_user, user_id, &room_id).unwrap_or(false) + }), + ); + } + + let (joined_member_count, invited_member_count, heroes) = if send_member_count { + calculate_counts()? + } else { + (None, None, Vec::new()) + }; + + ( + heroes, + joined_member_count, + invited_member_count, + joined_since_last_sync, + state_events, + ) + }; + + // Look for device list updates in this room + device_list_updates.extend( + services() + .users + .keys_changed(room_id.as_ref(), since, None) + .filter_map(|r| r.ok()), + ); + + let notification_count = if send_notification_counts { + Some( + services() + .rooms + .user + .notification_count(&sender_user, &room_id)? + .try_into() + .expect("notification count can't go that high"), + ) + } else { + None + }; + + let highlight_count = if send_notification_counts { + Some( + services() + .rooms + .user + .highlight_count(&sender_user, &room_id)? + .try_into() + .expect("highlight count can't go that high"), + ) + } else { + None + }; + + let prev_batch = timeline_pdus + .first() + .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { + Ok(Some( + services().rooms.timeline.pdu_count(pdu_id)?.to_string(), + )) + })?; + + let room_events: Vec<_> = timeline_pdus + .iter() + .map(|(_, pdu)| pdu.to_sync_room_event()) + .collect(); + + let mut edus: Vec<_> = services() + .rooms + .edus + .read_receipt + .readreceipts_since(&room_id, since) + .filter_map(|r| r.ok()) // Filter out buggy events + .map(|(_, _, v)| v) + .collect(); + + if services().rooms.edus.typing.last_typing_update(&room_id)? > since { + edus.push( + serde_json::from_str( + &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) + .expect("event is valid, we just created it"), + ) + .expect("event is valid, we just created it"), + ); + } + + // Save the state after this sync so we can send the correct state diff next sync + services().rooms.user.associate_token_shortstatehash( + &room_id, + next_batch, + current_shortstatehash, + )?; + + let joined_room = JoinedRoom { + account_data: RoomAccountData { + events: services() + .account_data + .changes_since(Some(&room_id), &sender_user, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect(), + }, + summary: RoomSummary { + heroes, + joined_member_count: joined_member_count.map(|n| (n as u32).into()), + invited_member_count: invited_member_count.map(|n| (n as u32).into()), + }, + unread_notifications: UnreadNotificationsCount { + highlight_count, + notification_count, + }, + timeline: Timeline { + limited: limited || joined_since_last_sync, + prev_batch, + events: room_events, + }, + state: State { + events: state_events + .iter() + .map(|pdu| pdu.to_sync_state_event()) + .collect(), + }, + ephemeral: Ephemeral { events: edus }, + unread_thread_notifications: BTreeMap::new(), + }; + + if !joined_room.is_empty() { + joined_rooms.insert(room_id.clone(), joined_room); + } + + // Take presence updates from this room + for (user_id, presence) in services() + .rooms + .edus + .presence + .presence_since(&room_id, since)? + { + match presence_updates.entry(user_id) { + Entry::Vacant(v) => { + v.insert(presence); + } + Entry::Occupied(mut o) => { + let p = o.get_mut(); + + // Update existing presence event with more info + p.content.presence = presence.content.presence; + if let Some(status_msg) = presence.content.status_msg { + p.content.status_msg = Some(status_msg); + } + if let Some(last_active_ago) = presence.content.last_active_ago { + p.content.last_active_ago = Some(last_active_ago); + } + if let Some(displayname) = presence.content.displayname { + p.content.displayname = Some(displayname); + } + if let Some(avatar_url) = presence.content.avatar_url { + p.content.avatar_url = Some(avatar_url); + } + if let Some(currently_active) = presence.content.currently_active { + p.content.currently_active = Some(currently_active); + } + } + } + } + } + + let mut left_rooms = BTreeMap::new(); + let all_left_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_left(&sender_user) + .collect(); + for result in all_left_rooms { + let (room_id, _) = result?; + + let mut left_state_events = Vec::new(); + + { + // Get and drop the lock to wait for remaining operations to finish + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } + + let left_count = services() + .rooms + .state_cache + .get_left_count(&room_id, &sender_user)?; + + // Left before last sync + if Some(since) >= left_count { + continue; + } + + if !services().rooms.metadata.exists(&room_id)? { + // This is just a rejected invite, not a room we know + continue; + } + + let since_shortstatehash = services() + .rooms + .user + .get_token_shortstatehash(&room_id, since)?; + + let since_state_ids = match since_shortstatehash { + Some(s) => services().rooms.state_accessor.state_full_ids(s).await?, + None => BTreeMap::new(), + }; + + let left_event_id = match services().rooms.state_accessor.room_state_get_id( + &room_id, + &StateEventType::RoomMember, + sender_user.as_str(), + )? { + Some(e) => e, + None => { + error!("Left room but no left state event"); + continue; + } + }; + + let left_shortstatehash = match services() + .rooms + .state_accessor + .pdu_shortstatehash(&left_event_id)? + { + Some(s) => s, + None => { + error!("Leave event has no state"); + continue; + } + }; + + let mut left_state_ids = services() + .rooms + .state_accessor + .state_full_ids(left_shortstatehash) + .await?; + + let leave_shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&StateEventType::RoomMember, &sender_user.as_str())?; + + left_state_ids.insert(leave_shortstatekey, left_event_id); + + let mut i = 0; + for (key, id) in left_state_ids { + if body.full_state || since_state_ids.get(&key) != Some(&id) { + let (event_type, state_key) = + services().rooms.short.get_statekey_from_short(key)?; + + if !lazy_load_enabled + || event_type != StateEventType::RoomMember + || body.full_state + // TODO: Delete the following line when this is resolved: https://github.com/vector-im/element-web/issues/22565 + || *sender_user == state_key + { + let pdu = match services().rooms.timeline.get_pdu(&id)? { + Some(pdu) => pdu, + None => { + error!("Pdu in state not found: {}", id); + continue; + } + }; + + left_state_events.push(pdu.to_sync_state_event()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + } + } + + left_rooms.insert( + room_id.clone(), + LeftRoom { + account_data: RoomAccountData { events: Vec::new() }, + timeline: Timeline { + limited: false, + prev_batch: Some(next_batch_string.clone()), + events: Vec::new(), + }, + state: State { + events: left_state_events, + }, + }, + ); + } + + let mut invited_rooms = BTreeMap::new(); + let all_invited_rooms: Vec<_> = services() + .rooms + .state_cache + .rooms_invited(&sender_user) + .collect(); + for result in all_invited_rooms { + let (room_id, invite_state_events) = result?; + + { + // Get and drop the lock to wait for remaining operations to finish + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + drop(insert_lock); + } + + let invite_count = services() + .rooms + .state_cache + .get_invite_count(&room_id, &sender_user)?; + + // Invited before last sync + if Some(since) >= invite_count { + continue; + } + + invited_rooms.insert( + room_id.clone(), + InvitedRoom { + invite_state: InviteState { + events: invite_state_events, + }, + }, + ); + } + + for user_id in left_encrypted_users { + let still_share_encrypted_room = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? + .filter_map(|r| r.ok()) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .ok()? + .is_some(), + ) + }) + .all(|encrypted| !encrypted); + // If the user doesn't share an encrypted room with the target anymore, we need to tell + // them + if still_share_encrypted_room { + device_list_left.insert(user_id); + } + } + + // Remove all to-device events the device received *last time* + services() + .users + .remove_to_device_events(&sender_user, &sender_device, since)?; + + let response = sync_events::v3::Response { + next_batch: next_batch_string, + rooms: Rooms { + leave: left_rooms, + join: joined_rooms, + invite: invited_rooms, + knock: BTreeMap::new(), // TODO + }, + presence: Presence { + events: presence_updates + .into_values() + .map(|v| Raw::new(&v).expect("PresenceEvent always serializes successfully")) + .collect(), + }, + account_data: GlobalAccountData { + events: services() + .account_data + .changes_since(None, &sender_user, since)? + .into_iter() + .filter_map(|(_, v)| { + serde_json::from_str(v.json().get()) + .map_err(|_| Error::bad_database("Invalid account event in database.")) + .ok() + }) + .collect(), + }, + device_lists: DeviceLists { + changed: device_list_updates.into_iter().collect(), + left: device_list_left.into_iter().collect(), + }, + device_one_time_keys_count: services() + .users + .count_one_time_keys(&sender_user, &sender_device)?, + to_device: ToDevice { + events: services() + .users + .get_to_device_events(&sender_user, &sender_device)?, + }, + // Fallback keys are not yet supported + device_unused_fallback_key_types: None, + }; + + // TODO: Retry the endpoint instead of returning (waiting for #118) + if !body.full_state + && response.rooms.is_empty() + && response.presence.is_empty() + && response.account_data.is_empty() + && response.device_lists.is_empty() + && response.to_device.is_empty() + { + // Hang a few seconds so requests are not spammed + // Stop hanging if new info arrives + let mut duration = body.timeout.unwrap_or_default(); + if duration.as_secs() > 30 { + duration = Duration::from_secs(30); + } + let _ = tokio::time::timeout(duration, watcher).await; + Ok((response, false)) + } else { + Ok((response, since != next_batch)) // Only cache if we made progress + } +} + +fn share_encrypted_room( + sender_user: &UserId, + user_id: &UserId, + ignore_room: &RoomId, +) -> Result { + Ok(services() + .rooms + .user + .get_shared_rooms(vec![sender_user.to_owned(), user_id.to_owned()])? + .filter_map(|r| r.ok()) + .filter(|room_id| room_id != ignore_room) + .filter_map(|other_room_id| { + Some( + services() + .rooms + .state_accessor + .room_state_get(&other_room_id, &StateEventType::RoomEncryption, "") + .ok()? + .is_some(), + ) + }) + .any(|encrypted| encrypted)) +} diff --git a/src/api/client_server/tag.rs b/src/api/client_server/tag.rs new file mode 100644 index 00000000..c87e2335 --- /dev/null +++ b/src/api/client_server/tag.rs @@ -0,0 +1,128 @@ +use crate::{services, Error, Result, Ruma}; +use ruma::{ + api::client::tag::{create_tag, delete_tag, get_tags}, + events::{ + tag::{TagEvent, TagEventContent}, + RoomAccountDataEventType, + }, +}; +use std::collections::BTreeMap; + +/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` +/// +/// Adds a tag to the room. +/// +/// - Inserts the tag into the tag event of the room account data. +pub async fn update_tag_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; + + let mut tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; + + tags_event + .content + .tags + .insert(body.tag.clone().into(), body.tag_info.clone()); + + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + &serde_json::to_value(tags_event).expect("to json value always works"), + )?; + + Ok(create_tag::v3::Response {}) +} + +/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` +/// +/// Deletes a tag from the room. +/// +/// - Removes the tag from the tag event of the room account data. +pub async fn delete_tag_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; + + let mut tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; + + tags_event.content.tags.remove(&body.tag.clone().into()); + + services().account_data.update( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + &serde_json::to_value(tags_event).expect("to json value always works"), + )?; + + Ok(delete_tag::v3::Response {}) +} + +/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` +/// +/// Returns tags on the room. +/// +/// - Gets the tag event of the room account data. +pub async fn get_tags_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let event = services().account_data.get( + Some(&body.room_id), + sender_user, + RoomAccountDataEventType::Tag, + )?; + + let tags_event = event + .map(|e| { + serde_json::from_str(e.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .unwrap_or_else(|| { + Ok(TagEvent { + content: TagEventContent { + tags: BTreeMap::new(), + }, + }) + })?; + + Ok(get_tags::v3::Response { + tags: tags_event.content.tags, + }) +} diff --git a/src/api/client_server/thirdparty.rs b/src/api/client_server/thirdparty.rs new file mode 100644 index 00000000..5665ad6c --- /dev/null +++ b/src/api/client_server/thirdparty.rs @@ -0,0 +1,16 @@ +use crate::{Result, Ruma}; +use ruma::api::client::thirdparty::get_protocols; + +use std::collections::BTreeMap; + +/// # `GET /_matrix/client/r0/thirdparty/protocols` +/// +/// TODO: Fetches all metadata about protocols supported by the homeserver. +pub async fn get_protocols_route( + _body: Ruma, +) -> Result { + // TODO + Ok(get_protocols::v3::Response { + protocols: BTreeMap::new(), + }) +} diff --git a/src/client_server/to_device.rs b/src/api/client_server/to_device.rs similarity index 57% rename from src/client_server/to_device.rs rename to src/api/client_server/to_device.rs index 177b1234..139b845d 100644 --- a/src/client_server/to_device.rs +++ b/src/api/client_server/to_device.rs @@ -1,85 +1,75 @@ +use ruma::events::ToDeviceEventType; use std::collections::BTreeMap; -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; +use crate::{services, Error, Result, Ruma}; use ruma::{ api::{ - client::{error::ErrorKind, r0::to_device::send_event_to_device}, + client::{error::ErrorKind, to_device::send_event_to_device}, federation::{self, transactions::edu::DirectDeviceContent}, }, - events::EventType, to_device::DeviceIdOrAllDevices, }; -#[cfg(feature = "conduit_bin")] -use rocket::put; - /// # `PUT /_matrix/client/r0/sendToDevice/{eventType}/{txnId}` /// /// Send a to-device event to a set of client devices. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/sendToDevice/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] pub async fn send_event_to_device_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { + body: Ruma, +) -> Result { let sender_user = body.sender_user.as_ref().expect("user is authenticated"); let sender_device = body.sender_device.as_deref(); - // TODO: uncomment when https://github.com/vector-im/element-android/issues/3589 is solved // Check if this is a new transaction id - /* - if db + if services() .transaction_ids .existing_txnid(sender_user, sender_device, &body.txn_id)? .is_some() { - return Ok(send_event_to_device::Response.into()); + return Ok(send_event_to_device::v3::Response {}); } - */ for (target_user_id, map) in &body.messages { for (target_device_id_maybe, event) in map { - if target_user_id.server_name() != db.globals.server_name() { + if target_user_id.server_name() != services().globals.server_name() { let mut map = BTreeMap::new(); map.insert(target_device_id_maybe.clone(), event.clone()); let mut messages = BTreeMap::new(); messages.insert(target_user_id.clone(), map); + let count = services().globals.next_count()?; - db.sending.send_reliable_edu( + services().sending.send_reliable_edu( target_user_id.server_name(), serde_json::to_vec(&federation::transactions::edu::Edu::DirectToDevice( DirectDeviceContent { sender: sender_user.clone(), - ev_type: EventType::from(&body.event_type), - message_id: body.txn_id.clone(), + ev_type: ToDeviceEventType::from(&*body.event_type), + message_id: count.to_string().into(), messages, }, )) .expect("DirectToDevice EDU can be serialized"), - db.globals.next_count()?, + count, )?; continue; } match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => db.users.add_to_device_event( - sender_user, - target_user_id, - target_device_id, - &body.event_type, - event.deserialize_as().map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") - })?, - &db.globals, - )?, + DeviceIdOrAllDevices::DeviceId(target_device_id) => { + services().users.add_to_device_event( + sender_user, + target_user_id, + target_device_id, + &body.event_type, + event.deserialize_as().map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") + })?, + )? + } DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(target_user_id) { - db.users.add_to_device_event( + for target_device_id in services().users.all_device_ids(target_user_id) { + services().users.add_to_device_event( sender_user, target_user_id, &target_device_id?, @@ -87,7 +77,6 @@ pub async fn send_event_to_device_route( event.deserialize_as().map_err(|_| { Error::BadRequest(ErrorKind::InvalidParam, "Event is invalid") })?, - &db.globals, )?; } } @@ -96,10 +85,9 @@ pub async fn send_event_to_device_route( } // Save transaction id with empty data - db.transaction_ids + services() + .transaction_ids .add_txnid(sender_user, sender_device, &body.txn_id, &[])?; - db.flush()?; - - Ok(send_event_to_device::Response {}.into()) + Ok(send_event_to_device::v3::Response {}) } diff --git a/src/api/client_server/typing.rs b/src/api/client_server/typing.rs new file mode 100644 index 00000000..ecc926f4 --- /dev/null +++ b/src/api/client_server/typing.rs @@ -0,0 +1,40 @@ +use crate::{services, utils, Error, Result, Ruma}; +use ruma::api::client::{error::ErrorKind, typing::create_typing_event}; + +/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` +/// +/// Sets the typing state of the sender user. +pub async fn create_typing_event_route( + body: Ruma, +) -> Result { + use create_typing_event::v3::Typing; + + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + if !services() + .rooms + .state_cache + .is_joined(sender_user, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "You are not in this room.", + )); + } + + if let Typing::Yes(duration) = body.state { + services().rooms.edus.typing.typing_add( + sender_user, + &body.room_id, + duration.as_millis() as u64 + utils::millis_since_unix_epoch(), + )?; + } else { + services() + .rooms + .edus + .typing + .typing_remove(sender_user, &body.room_id)?; + } + + Ok(create_typing_event::v3::Response {}) +} diff --git a/src/api/client_server/unversioned.rs b/src/api/client_server/unversioned.rs new file mode 100644 index 00000000..8a5c3d25 --- /dev/null +++ b/src/api/client_server/unversioned.rs @@ -0,0 +1,31 @@ +use std::{collections::BTreeMap, iter::FromIterator}; + +use ruma::api::client::discovery::get_supported_versions; + +use crate::{Result, Ruma}; + +/// # `GET /_matrix/client/versions` +/// +/// Get the versions of the specification and unstable features supported by this server. +/// +/// - Versions take the form MAJOR.MINOR.PATCH +/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value +/// - Unstable features are namespaced and may include version information in their name +/// +/// Note: Unstable features are used while developing new features. Clients should avoid using +/// unstable features in their stable releases +pub async fn get_supported_versions_route( + _body: Ruma, +) -> Result { + let resp = get_supported_versions::Response { + versions: vec![ + "r0.5.0".to_owned(), + "r0.6.0".to_owned(), + "v1.1".to_owned(), + "v1.2".to_owned(), + ], + unstable_features: BTreeMap::from_iter([("org.matrix.e2e_cross_signing".to_owned(), true)]), + }; + + Ok(resp) +} diff --git a/src/api/client_server/user_directory.rs b/src/api/client_server/user_directory.rs new file mode 100644 index 00000000..518daa5e --- /dev/null +++ b/src/api/client_server/user_directory.rs @@ -0,0 +1,94 @@ +use crate::{services, Result, Ruma}; +use ruma::{ + api::client::user_directory::search_users, + events::{ + room::join_rules::{JoinRule, RoomJoinRulesEventContent}, + StateEventType, + }, +}; + +/// # `POST /_matrix/client/r0/user_directory/search` +/// +/// Searches all known users for a match. +/// +/// - Hides any local users that aren't in any public rooms (i.e. those that have the join rule set to public) +/// and don't share a room with the sender +pub async fn search_users_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + let limit = u64::from(body.limit) as usize; + + let mut users = services().users.iter().filter_map(|user_id| { + // Filter out buggy users (they should not exist, but you never know...) + let user_id = user_id.ok()?; + + let user = search_users::v3::User { + user_id: user_id.clone(), + display_name: services().users.displayname(&user_id).ok()?, + avatar_url: services().users.avatar_url(&user_id).ok()?, + }; + + let user_id_matches = user + .user_id + .to_string() + .to_lowercase() + .contains(&body.search_term.to_lowercase()); + + let user_displayname_matches = user + .display_name + .as_ref() + .filter(|name| { + name.to_lowercase() + .contains(&body.search_term.to_lowercase()) + }) + .is_some(); + + if !user_id_matches && !user_displayname_matches { + return None; + } + + let user_is_in_public_rooms = services() + .rooms + .state_cache + .rooms_joined(&user_id) + .filter_map(|r| r.ok()) + .any(|room| { + services() + .rooms + .state_accessor + .room_state_get(&room, &StateEventType::RoomJoinRules, "") + .map_or(false, |event| { + event.map_or(false, |event| { + serde_json::from_str(event.content.get()) + .map_or(false, |r: RoomJoinRulesEventContent| { + r.join_rule == JoinRule::Public + }) + }) + }) + }); + + if user_is_in_public_rooms { + return Some(user); + } + + let user_is_in_shared_rooms = services() + .rooms + .user + .get_shared_rooms(vec![sender_user.clone(), user_id]) + .ok()? + .next() + .is_some(); + + if user_is_in_shared_rooms { + return Some(user); + } + + None + }); + + let results = users.by_ref().take(limit).collect(); + let limited = users.next().is_some(); + + Ok(search_users::v3::Response { results, limited }) +} diff --git a/src/api/client_server/voip.rs b/src/api/client_server/voip.rs new file mode 100644 index 00000000..6b1ee400 --- /dev/null +++ b/src/api/client_server/voip.rs @@ -0,0 +1,47 @@ +use crate::{services, Result, Ruma}; +use hmac::{Hmac, Mac}; +use ruma::{api::client::voip::get_turn_server_info, SecondsSinceUnixEpoch}; +use sha1::Sha1; +use std::time::{Duration, SystemTime}; + +type HmacSha1 = Hmac; + +/// # `GET /_matrix/client/r0/voip/turnServer` +/// +/// TODO: Returns information about the recommended turn server. +pub async fn turn_server_route( + body: Ruma, +) -> Result { + let sender_user = body.sender_user.as_ref().expect("user is authenticated"); + + let turn_secret = services().globals.turn_secret().clone(); + + let (username, password) = if !turn_secret.is_empty() { + let expiry = SecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(services().globals.turn_ttl()), + ) + .expect("time is valid"); + + let username: String = format!("{}:{}", expiry.get(), sender_user); + + let mut mac = HmacSha1::new_from_slice(turn_secret.as_bytes()) + .expect("HMAC can take key of any size"); + mac.update(username.as_bytes()); + + let password: String = base64::encode_config(mac.finalize().into_bytes(), base64::STANDARD); + + (username, password) + } else { + ( + services().globals.turn_username().clone(), + services().globals.turn_password().clone(), + ) + }; + + Ok(get_turn_server_info::v3::Response { + username, + password, + uris: services().globals.turn_uris().to_vec(), + ttl: Duration::from_secs(services().globals.turn_ttl()), + }) +} diff --git a/src/api/mod.rs b/src/api/mod.rs new file mode 100644 index 00000000..0d2cd664 --- /dev/null +++ b/src/api/mod.rs @@ -0,0 +1,4 @@ +pub mod appservice_server; +pub mod client_server; +pub mod ruma_wrapper; +pub mod server_server; diff --git a/src/api/ruma_wrapper/axum.rs b/src/api/ruma_wrapper/axum.rs new file mode 100644 index 00000000..d056f3f2 --- /dev/null +++ b/src/api/ruma_wrapper/axum.rs @@ -0,0 +1,364 @@ +use std::{collections::BTreeMap, iter::FromIterator, str}; + +use axum::{ + async_trait, + body::{Full, HttpBody}, + extract::{ + rejection::TypedHeaderRejectionReason, FromRequest, Path, RequestParts, TypedHeader, + }, + headers::{ + authorization::{Bearer, Credentials}, + Authorization, + }, + response::{IntoResponse, Response}, + BoxError, +}; +use bytes::{BufMut, Bytes, BytesMut}; +use http::StatusCode; +use ruma::{ + api::{client::error::ErrorKind, AuthScheme, IncomingRequest, OutgoingResponse}, + CanonicalJsonValue, OwnedDeviceId, OwnedServerName, UserId, +}; +use serde::Deserialize; +use tracing::{debug, error, warn}; + +use super::{Ruma, RumaResponse}; +use crate::{services, Error, Result}; + +#[async_trait] +impl FromRequest for Ruma +where + T: IncomingRequest, + B: HttpBody + Send, + B::Data: Send, + B::Error: Into, +{ + type Rejection = Error; + + async fn from_request(req: &mut RequestParts) -> Result { + #[derive(Deserialize)] + struct QueryParams { + access_token: Option, + user_id: Option, + } + + let metadata = T::METADATA; + let auth_header = Option::>>::from_request(req).await?; + let path_params = Path::>::from_request(req).await?; + + let query = req.uri().query().unwrap_or_default(); + let query_params: QueryParams = match ruma::serde::urlencoded::from_str(query) { + Ok(params) => params, + Err(e) => { + error!(%query, "Failed to deserialize query parameters: {}", e); + return Err(Error::BadRequest( + ErrorKind::Unknown, + "Failed to read query parameters", + )); + } + }; + + let token = match &auth_header { + Some(TypedHeader(Authorization(bearer))) => Some(bearer.token()), + None => query_params.access_token.as_deref(), + }; + + let mut body = Bytes::from_request(req) + .await + .map_err(|_| Error::BadRequest(ErrorKind::MissingToken, "Missing token."))?; + + let mut json_body = serde_json::from_slice::(&body).ok(); + + let appservices = services().appservice.all().unwrap(); + let appservice_registration = appservices.iter().find(|(_id, registration)| { + registration + .get("as_token") + .and_then(|as_token| as_token.as_str()) + .map_or(false, |as_token| token == Some(as_token)) + }); + + let (sender_user, sender_device, sender_servername, from_appservice) = + if let Some((_id, registration)) = appservice_registration { + match metadata.authentication { + AuthScheme::AccessToken => { + let user_id = query_params.user_id.map_or_else( + || { + UserId::parse_with_server_name( + registration + .get("sender_localpart") + .unwrap() + .as_str() + .unwrap(), + services().globals.server_name(), + ) + .unwrap() + }, + |s| UserId::parse(s).unwrap(), + ); + + if !services().users.exists(&user_id).unwrap() { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "User does not exist.", + )); + } + + // TODO: Check if appservice is allowed to be that user + (Some(user_id), None, None, true) + } + AuthScheme::ServerSignatures => (None, None, None, true), + AuthScheme::None => (None, None, None, true), + } + } else { + match metadata.authentication { + AuthScheme::AccessToken => { + let token = match token { + Some(token) => token, + _ => { + return Err(Error::BadRequest( + ErrorKind::MissingToken, + "Missing access token.", + )) + } + }; + + match services().users.find_from_token(token).unwrap() { + None => { + return Err(Error::BadRequest( + ErrorKind::UnknownToken { soft_logout: false }, + "Unknown access token.", + )) + } + Some((user_id, device_id)) => ( + Some(user_id), + Some(OwnedDeviceId::from(device_id)), + None, + false, + ), + } + } + AuthScheme::ServerSignatures => { + let TypedHeader(Authorization(x_matrix)) = + TypedHeader::>::from_request(req) + .await + .map_err(|e| { + warn!("Missing or invalid Authorization header: {}", e); + + let msg = match e.reason() { + TypedHeaderRejectionReason::Missing => { + "Missing Authorization header." + } + TypedHeaderRejectionReason::Error(_) => { + "Invalid X-Matrix signatures." + } + _ => "Unknown header-related error", + }; + + Error::BadRequest(ErrorKind::Forbidden, msg) + })?; + + let origin_signatures = BTreeMap::from_iter([( + x_matrix.key.clone(), + CanonicalJsonValue::String(x_matrix.sig), + )]); + + let signatures = BTreeMap::from_iter([( + x_matrix.origin.as_str().to_owned(), + CanonicalJsonValue::Object(origin_signatures), + )]); + + let mut request_map = BTreeMap::from_iter([ + ( + "method".to_owned(), + CanonicalJsonValue::String(req.method().to_string()), + ), + ( + "uri".to_owned(), + CanonicalJsonValue::String(req.uri().to_string()), + ), + ( + "origin".to_owned(), + CanonicalJsonValue::String(x_matrix.origin.as_str().to_owned()), + ), + ( + "destination".to_owned(), + CanonicalJsonValue::String( + services().globals.server_name().as_str().to_owned(), + ), + ), + ( + "signatures".to_owned(), + CanonicalJsonValue::Object(signatures), + ), + ]); + + if let Some(json_body) = &json_body { + request_map.insert("content".to_owned(), json_body.clone()); + }; + + let keys_result = services() + .rooms + .event_handler + .fetch_signing_keys(&x_matrix.origin, vec![x_matrix.key.to_owned()]) + .await; + + let keys = match keys_result { + Ok(b) => b, + Err(e) => { + warn!("Failed to fetch signing keys: {}", e); + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Failed to fetch signing keys.", + )); + } + }; + + let pub_key_map = + BTreeMap::from_iter([(x_matrix.origin.as_str().to_owned(), keys)]); + + match ruma::signatures::verify_json(&pub_key_map, &request_map) { + Ok(()) => (None, None, Some(x_matrix.origin), false), + Err(e) => { + warn!( + "Failed to verify json request from {}: {}\n{:?}", + x_matrix.origin, e, request_map + ); + + if req.uri().to_string().contains('@') { + warn!( + "Request uri contained '@' character. Make sure your \ + reverse proxy gives Conduit the raw uri (apache: use \ + nocanon)" + ); + } + + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Failed to verify X-Matrix signatures.", + )); + } + } + } + AuthScheme::None => (None, None, None, false), + } + }; + + let mut http_request = http::Request::builder().uri(req.uri()).method(req.method()); + *http_request.headers_mut().unwrap() = req.headers().clone(); + + if let Some(CanonicalJsonValue::Object(json_body)) = &mut json_body { + let user_id = sender_user.clone().unwrap_or_else(|| { + UserId::parse_with_server_name("", services().globals.server_name()) + .expect("we know this is valid") + }); + + let uiaa_request = json_body + .get("auth") + .and_then(|auth| auth.as_object()) + .and_then(|auth| auth.get("session")) + .and_then(|session| session.as_str()) + .and_then(|session| { + services().uiaa.get_uiaa_request( + &user_id, + &sender_device.clone().unwrap_or_else(|| "".into()), + session, + ) + }); + + if let Some(CanonicalJsonValue::Object(initial_request)) = uiaa_request { + for (key, value) in initial_request { + json_body.entry(key).or_insert(value); + } + } + + let mut buf = BytesMut::new().writer(); + serde_json::to_writer(&mut buf, json_body).expect("value serialization can't fail"); + body = buf.into_inner().freeze(); + } + + let http_request = http_request.body(&*body).unwrap(); + + debug!("{:?}", http_request); + + let body = T::try_from_http_request(http_request, &path_params).map_err(|e| { + warn!("{:?}\n{:?}", e, json_body); + Error::BadRequest(ErrorKind::BadJson, "Failed to deserialize request.") + })?; + + Ok(Ruma { + body, + sender_user, + sender_device, + sender_servername, + from_appservice, + json_body, + }) + } +} + +struct XMatrix { + origin: OwnedServerName, + key: String, // KeyName? + sig: String, +} + +impl Credentials for XMatrix { + const SCHEME: &'static str = "X-Matrix"; + + fn decode(value: &http::HeaderValue) -> Option { + debug_assert!( + value.as_bytes().starts_with(b"X-Matrix "), + "HeaderValue to decode should start with \"X-Matrix ..\", received = {:?}", + value, + ); + + let parameters = str::from_utf8(&value.as_bytes()["X-Matrix ".len()..]) + .ok()? + .trim_start(); + + let mut origin = None; + let mut key = None; + let mut sig = None; + + for entry in parameters.split_terminator(',') { + let (name, value) = entry.split_once('=')?; + + // It's not at all clear why some fields are quoted and others not in the spec, + // let's simply accept either form for every field. + let value = value + .strip_prefix('"') + .and_then(|rest| rest.strip_suffix('"')) + .unwrap_or(value); + + // FIXME: Catch multiple fields of the same name + match name { + "origin" => origin = Some(value.try_into().ok()?), + "key" => key = Some(value.to_owned()), + "sig" => sig = Some(value.to_owned()), + _ => debug!( + "Unexpected field `{}` in X-Matrix Authorization header", + name + ), + } + } + + Some(Self { + origin: origin?, + key: key?, + sig: sig?, + }) + } + + fn encode(&self) -> http::HeaderValue { + todo!() + } +} + +impl IntoResponse for RumaResponse { + fn into_response(self) -> Response { + match self.0.try_into_http_response::() { + Ok(res) => res.map(BytesMut::freeze).map(Full::new).into_response(), + Err(_) => StatusCode::INTERNAL_SERVER_ERROR.into_response(), + } + } +} diff --git a/src/api/ruma_wrapper/mod.rs b/src/api/ruma_wrapper/mod.rs new file mode 100644 index 00000000..ac4c825a --- /dev/null +++ b/src/api/ruma_wrapper/mod.rs @@ -0,0 +1,43 @@ +use crate::Error; +use ruma::{ + api::client::uiaa::UiaaResponse, CanonicalJsonValue, OwnedDeviceId, OwnedServerName, + OwnedUserId, +}; +use std::ops::Deref; + +#[cfg(feature = "conduit_bin")] +mod axum; + +/// Extractor for Ruma request structs +pub struct Ruma { + pub body: T, + pub sender_user: Option, + pub sender_device: Option, + pub sender_servername: Option, + // This is None when body is not a valid string + pub json_body: Option, + pub from_appservice: bool, +} + +impl Deref for Ruma { + type Target = T; + + fn deref(&self) -> &Self::Target { + &self.body + } +} + +#[derive(Clone)] +pub struct RumaResponse(pub T); + +impl From for RumaResponse { + fn from(t: T) -> Self { + Self(t) + } +} + +impl From for RumaResponse { + fn from(t: Error) -> Self { + t.to_response() + } +} diff --git a/src/api/server_server.rs b/src/api/server_server.rs new file mode 100644 index 00000000..320e396b --- /dev/null +++ b/src/api/server_server.rs @@ -0,0 +1,1819 @@ +use crate::{ + api::client_server::{self, claim_keys_helper, get_keys_helper}, + service::pdu::{gen_event_id_canonical_json, PduBuilder}, + services, utils, Error, PduEvent, Result, Ruma, +}; +use axum::{response::IntoResponse, Json}; +use get_profile_information::v1::ProfileField; +use http::header::{HeaderValue, AUTHORIZATION}; + +use ruma::{ + api::{ + client::error::{Error as RumaError, ErrorKind}, + federation::{ + authorization::get_event_authorization, + device::get_devices::{self, v1::UserDevice}, + directory::{get_public_rooms, get_public_rooms_filtered}, + discovery::{get_server_keys, get_server_version, ServerSigningKeys, VerifyKey}, + event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, + keys::{claim_keys, get_keys}, + membership::{ + create_invite, + create_join_event::{self, RoomState}, + prepare_join_event, + }, + query::{get_profile_information, get_room_information}, + transactions::{ + edu::{DeviceListUpdateContent, DirectDeviceContent, Edu, SigningKeyUpdateContent}, + send_transaction_message, + }, + }, + EndpointError, IncomingResponse, MatrixVersion, OutgoingRequest, OutgoingResponse, + SendAccessToken, + }, + directory::{IncomingFilter, IncomingRoomNetwork}, + events::{ + receipt::{ReceiptEvent, ReceiptEventContent, ReceiptType}, + room::{ + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + }, + RoomEventType, StateEventType, + }, + serde::{Base64, JsonObject, Raw}, + to_device::DeviceIdOrAllDevices, + CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, OwnedEventId, OwnedRoomId, + OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, RoomId, ServerName, +}; +use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; +use std::{ + collections::BTreeMap, + fmt::Debug, + mem, + net::{IpAddr, SocketAddr}, + sync::{Arc, RwLock}, + time::{Duration, Instant, SystemTime}, +}; + +use tracing::{error, info, warn}; + +/// Wraps either an literal IP address plus port, or a hostname plus complement +/// (colon-plus-port if it was specified). +/// +/// Note: A `FedDest::Named` might contain an IP address in string form if there +/// was no port specified to construct a SocketAddr with. +/// +/// # Examples: +/// ```rust +/// # use conduit::api::server_server::FedDest; +/// # fn main() -> Result<(), std::net::AddrParseError> { +/// FedDest::Literal("198.51.100.3:8448".parse()?); +/// FedDest::Literal("[2001:db8::4:5]:443".parse()?); +/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); +/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); +/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); +/// # Ok(()) +/// # } +/// ``` +#[derive(Clone, Debug, PartialEq, Eq)] +pub enum FedDest { + Literal(SocketAddr), + Named(String, String), +} + +impl FedDest { + fn into_https_string(self) -> String { + match self { + Self::Literal(addr) => format!("https://{}", addr), + Self::Named(host, port) => format!("https://{}{}", host, port), + } + } + + fn into_uri_string(self) -> String { + match self { + Self::Literal(addr) => addr.to_string(), + Self::Named(host, ref port) => host + port, + } + } + + fn hostname(&self) -> String { + match &self { + Self::Literal(addr) => addr.ip().to_string(), + Self::Named(host, _) => host.clone(), + } + } + + fn port(&self) -> Option { + match &self { + Self::Literal(addr) => Some(addr.port()), + Self::Named(_, port) => port[1..].parse().ok(), + } + } +} + +#[tracing::instrument(skip(request))] +pub(crate) async fn send_request( + destination: &ServerName, + request: T, +) -> Result +where + T: Debug, +{ + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut write_destination_to_cache = false; + + let cached_result = services() + .globals + .actual_destination_cache + .read() + .unwrap() + .get(destination) + .cloned(); + + let (actual_destination, host) = if let Some(result) = cached_result { + result + } else { + write_destination_to_cache = true; + + let result = find_actual_destination(destination).await; + + (result.0, result.1.into_uri_string()) + }; + + let actual_destination_str = actual_destination.clone().into_https_string(); + + let mut http_request = request + .try_into_http_request::>( + &actual_destination_str, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) + .map_err(|e| { + warn!( + "Failed to find destination {}: {}", + actual_destination_str, e + ); + Error::BadServerResponse("Invalid destination") + })?; + + let mut request_map = serde_json::Map::new(); + + if !http_request.body().is_empty() { + request_map.insert( + "content".to_owned(), + serde_json::from_slice(http_request.body()) + .expect("body is valid json, we just created it"), + ); + }; + + request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); + request_map.insert( + "uri".to_owned(), + http_request + .uri() + .path_and_query() + .expect("all requests have a path") + .to_string() + .into(), + ); + request_map.insert( + "origin".to_owned(), + services().globals.server_name().as_str().into(), + ); + request_map.insert("destination".to_owned(), destination.as_str().into()); + + let mut request_json = + serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); + + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut request_json, + ) + .expect("our request json is what ruma expects"); + + let request_json: serde_json::Map = + serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); + + let signatures = request_json["signatures"] + .as_object() + .unwrap() + .values() + .map(|v| { + v.as_object() + .unwrap() + .iter() + .map(|(k, v)| (k, v.as_str().unwrap())) + }); + + for signature_server in signatures { + for s in signature_server { + http_request.headers_mut().insert( + AUTHORIZATION, + HeaderValue::from_str(&format!( + "X-Matrix origin={},key=\"{}\",sig=\"{}\"", + services().globals.server_name(), + s.0, + s.1 + )) + .unwrap(), + ); + } + } + + let reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + let url = reqwest_request.url().clone(); + + let response = services() + .globals + .federation_client() + .execute(reqwest_request) + .await; + + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { + warn!( + "{} {}: {}", + url, + status, + String::from_utf8_lossy(&body) + .lines() + .collect::>() + .join(" ") + ); + } + + let http_response = http_response_builder + .body(body) + .expect("reqwest body is valid http body"); + + if status == 200 { + let response = T::IncomingResponse::try_from_http_response(http_response); + if response.is_ok() && write_destination_to_cache { + services() + .globals + .actual_destination_cache + .write() + .unwrap() + .insert( + OwnedServerName::from(destination), + (actual_destination, host), + ); + } + + response.map_err(|e| { + warn!( + "Invalid 200 response from {} on: {} {}", + &destination, url, e + ); + Error::BadServerResponse("Server returned bad 200 response.") + }) + } else { + Err(Error::FederationError( + destination.to_owned(), + RumaError::try_from_http_response(http_response).map_err(|e| { + warn!( + "Invalid {} response from {} on: {} {}", + status, &destination, url, e + ); + Error::BadServerResponse("Server returned bad error response.") + })?, + )) + } + } + Err(e) => { + warn!( + "Could not send request to {} at {}: {}", + destination, actual_destination_str, e + ); + Err(e.into()) + } + } +} + +fn get_ip_with_port(destination_str: &str) -> Option { + if let Ok(destination) = destination_str.parse::() { + Some(FedDest::Literal(destination)) + } else if let Ok(ip_addr) = destination_str.parse::() { + Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) + } else { + None + } +} + +fn add_port_to_hostname(destination_str: &str) -> FedDest { + let (host, port) = match destination_str.find(':') { + None => (destination_str, ":8448"), + Some(pos) => destination_str.split_at(pos), + }; + FedDest::Named(host.to_owned(), port.to_owned()) +} + +/// Returns: actual_destination, host header +/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names +/// Numbers in comments below refer to bullet points in linked section of specification +async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { + let destination_str = destination.as_str().to_owned(); + let mut hostname = destination_str.clone(); + let actual_destination = match get_ip_with_port(&destination_str) { + Some(host_port) => { + // 1: IP literal with provided or default port + host_port + } + None => { + if let Some(pos) = destination_str.find(':') { + // 2: Hostname with included port + let (host, port) = destination_str.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + match request_well_known(destination.as_str()).await { + // 3: A .well-known file is available + Some(delegated_hostname) => { + hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); + match get_ip_with_port(&delegated_hostname) { + Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file + None => { + if let Some(pos) = delegated_hostname.find(':') { + // 3.2: Hostname with port in .well-known file + let (host, port) = delegated_hostname.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + // Delegated hostname has no port in this branch + if let Some(hostname_override) = + query_srv_record(&delegated_hostname).await + { + // 3.3: SRV lookup successful + let force_port = hostname_override.port(); + + if let Ok(override_ip) = services() + .globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + delegated_hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + FedDest::Named(delegated_hostname, format!(":{}", port)) + } else { + add_port_to_hostname(&delegated_hostname) + } + } else { + // 3.4: No SRV records, just use the hostname from .well-known + add_port_to_hostname(&delegated_hostname) + } + } + } + } + } + // 4: No .well-known or an error occured + None => { + match query_srv_record(&destination_str).await { + // 4: SRV record found + Some(hostname_override) => { + let force_port = hostname_override.port(); + + if let Ok(override_ip) = services() + .globals + .dns_resolver() + .lookup_ip(hostname_override.hostname()) + .await + { + services() + .globals + .tls_name_override + .write() + .unwrap() + .insert( + hostname.clone(), + ( + override_ip.iter().collect(), + force_port.unwrap_or(8448), + ), + ); + } else { + warn!("Using SRV record, but could not resolve to IP"); + } + + if let Some(port) = force_port { + FedDest::Named(hostname.clone(), format!(":{}", port)) + } else { + add_port_to_hostname(&hostname) + } + } + // 5: No SRV record found + None => add_port_to_hostname(&destination_str), + } + } + } + } + } + }; + + // Can't use get_ip_with_port here because we don't want to add a port + // to an IP address if it wasn't specified + let hostname = if let Ok(addr) = hostname.parse::() { + FedDest::Literal(addr) + } else if let Ok(addr) = hostname.parse::() { + FedDest::Named(addr.to_string(), ":8448".to_owned()) + } else if let Some(pos) = hostname.find(':') { + let (host, port) = hostname.split_at(pos); + FedDest::Named(host.to_owned(), port.to_owned()) + } else { + FedDest::Named(hostname, ":8448".to_owned()) + }; + (actual_destination, hostname) +} + +async fn query_srv_record(hostname: &'_ str) -> Option { + if let Ok(Some(host_port)) = services() + .globals + .dns_resolver() + .srv_lookup(format!("_matrix._tcp.{}", hostname)) + .await + .map(|srv| { + srv.iter().next().map(|result| { + FedDest::Named( + result.target().to_string().trim_end_matches('.').to_owned(), + format!(":{}", result.port()), + ) + }) + }) + { + Some(host_port) + } else { + None + } +} + +async fn request_well_known(destination: &str) -> Option { + let body: serde_json::Value = serde_json::from_str( + &services() + .globals + .default_client() + .get(&format!( + "https://{}/.well-known/matrix/server", + destination + )) + .send() + .await + .ok()? + .text() + .await + .ok()?, + ) + .ok()?; + Some(body.get("m.server")?.as_str()?.to_owned()) +} + +/// # `GET /_matrix/federation/v1/version` +/// +/// Get version information on this server. +pub async fn get_server_version_route( + _body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + Ok(get_server_version::v1::Response { + server: Some(get_server_version::v1::Server { + name: Some("Conduit".to_owned()), + version: Some(env!("CARGO_PKG_VERSION").to_owned()), + }), + }) +} + +/// # `GET /_matrix/key/v2/server` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by this will be valid +/// forever. +// Response type for this endpoint is Json because we need to calculate a signature for the response +pub async fn get_server_keys_route() -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut verify_keys: BTreeMap = BTreeMap::new(); + verify_keys.insert( + format!("ed25519:{}", services().globals.keypair().version()) + .try_into() + .expect("found invalid server signing keys in DB"), + VerifyKey { + key: Base64::new(services().globals.keypair().public_key().to_vec()), + }, + ); + let mut response = serde_json::from_slice( + get_server_keys::v2::Response { + server_key: Raw::new(&ServerSigningKeys { + server_name: services().globals.server_name().to_owned(), + verify_keys, + old_verify_keys: BTreeMap::new(), + signatures: BTreeMap::new(), + valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + Duration::from_secs(86400 * 7), + ) + .expect("time is valid"), + }) + .expect("static conversion, no errors"), + } + .try_into_http_response::>() + .unwrap() + .body(), + ) + .unwrap(); + + ruma::signatures::sign_json( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut response, + ) + .unwrap(); + + Ok(Json(response)) +} + +/// # `GET /_matrix/key/v2/server/{keyId}` +/// +/// Gets the public signing keys of this server. +/// +/// - Matrix does not support invalidating public keys, so the key returned by this will be valid +/// forever. +pub async fn get_server_keys_deprecated_route() -> impl IntoResponse { + get_server_keys_route().await +} + +/// # `POST /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. +pub async fn get_public_rooms_filtered_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let response = client_server::get_public_rooms_filtered_helper( + None, + body.limit, + body.since.as_deref(), + &body.filter, + &body.room_network, + ) + .await?; + + Ok(get_public_rooms_filtered::v1::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +/// # `GET /_matrix/federation/v1/publicRooms` +/// +/// Lists the public rooms on this server. +pub async fn get_public_rooms_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let response = client_server::get_public_rooms_filtered_helper( + None, + body.limit, + body.since.as_deref(), + &IncomingFilter::default(), + &IncomingRoomNetwork::Matrix, + ) + .await?; + + Ok(get_public_rooms::v1::Response { + chunk: response.chunk, + prev_batch: response.prev_batch, + next_batch: response.next_batch, + total_room_count_estimate: response.total_room_count_estimate, + }) +} + +/// # `PUT /_matrix/federation/v1/send/{txnId}` +/// +/// Push EDUs and PDUs to this server. +pub async fn send_transaction_message_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let mut resolved_map = BTreeMap::new(); + + let pub_key_map = RwLock::new(BTreeMap::new()); + + // This is all the auth_events that have been recursively fetched so they don't have to be + // deserialized over and over again. + // TODO: make this persist across requests but not in a DB Tree (in globals?) + // TODO: This could potentially also be some sort of trie (suffix tree) like structure so + // that once an auth event is known it would know (using indexes maybe) all of the auth + // events that it references. + // let mut auth_cache = EventMap::new(); + + for pdu in &body.pdus { + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match gen_event_id_canonical_json(pdu) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + continue; + } + }; + + // 0. Check the server is in the room + let room_id: OwnedRoomId = match value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + { + Some(id) => id, + None => { + // Event is invalid + resolved_map.insert( + event_id, + Err(Error::bad_database("Event needs a valid RoomId.")), + ); + continue; + } + }; + + services() + .rooms + .event_handler + .acl_check(sender_servername, &room_id)?; + + let mutex = Arc::clone( + services() + .globals + .roomid_mutex_federation + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let start_time = Instant::now(); + resolved_map.insert( + event_id.clone(), + services() + .rooms + .event_handler + .handle_incoming_pdu( + sender_servername, + &event_id, + &room_id, + value, + true, + &pub_key_map, + ) + .await + .map(|_| ()), + ); + drop(mutex_lock); + + let elapsed = start_time.elapsed(); + warn!( + "Handling transaction of event {} took {}m{}s", + event_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + + for pdu in &resolved_map { + if let Err(e) = pdu.1 { + if matches!(e, Error::BadRequest(ErrorKind::NotFound, _)) { + warn!("Incoming PDU failed {:?}", pdu); + } + } + } + + for edu in body + .edus + .iter() + .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) + { + match edu { + Edu::Presence(_) => {} + Edu::Receipt(receipt) => { + for (room_id, room_updates) in receipt.receipts { + for (user_id, user_updates) in room_updates.read { + if let Some((event_id, _)) = user_updates + .event_ids + .iter() + .filter_map(|id| { + services() + .rooms + .timeline + .get_pdu_count(id) + .ok() + .flatten() + .map(|r| (id, r)) + }) + .max_by_key(|(_, count)| *count) + { + let mut user_receipts = BTreeMap::new(); + user_receipts.insert(user_id.clone(), user_updates.data); + + let mut receipts = BTreeMap::new(); + receipts.insert(ReceiptType::Read, user_receipts); + + let mut receipt_content = BTreeMap::new(); + receipt_content.insert(event_id.to_owned(), receipts); + + let event = ReceiptEvent { + content: ReceiptEventContent(receipt_content), + room_id: room_id.clone(), + }; + services() + .rooms + .edus + .read_receipt + .readreceipt_update(&user_id, &room_id, event)?; + } else { + // TODO fetch missing events + info!("No known event ids in read receipt: {:?}", user_updates); + } + } + } + } + Edu::Typing(typing) => { + if services() + .rooms + .state_cache + .is_joined(&typing.user_id, &typing.room_id)? + { + if typing.typing { + services().rooms.edus.typing.typing_add( + &typing.user_id, + &typing.room_id, + 3000 + utils::millis_since_unix_epoch(), + )?; + } else { + services() + .rooms + .edus + .typing + .typing_remove(&typing.user_id, &typing.room_id)?; + } + } + } + Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { + services().users.mark_device_key_update(&user_id)?; + } + Edu::DirectToDevice(DirectDeviceContent { + sender, + ev_type, + message_id, + messages, + }) => { + // Check if this is a new transaction id + if services() + .transaction_ids + .existing_txnid(&sender, None, &message_id)? + .is_some() + { + continue; + } + + for (target_user_id, map) in &messages { + for (target_device_id_maybe, event) in map { + match target_device_id_maybe { + DeviceIdOrAllDevices::DeviceId(target_device_id) => { + services().users.add_to_device_event( + &sender, + target_user_id, + target_device_id, + &ev_type.to_string(), + event.deserialize_as().map_err(|e| { + warn!("To-Device event is invalid: {event:?} {e}"); + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + )? + } + + DeviceIdOrAllDevices::AllDevices => { + for target_device_id in + services().users.all_device_ids(target_user_id) + { + services().users.add_to_device_event( + &sender, + target_user_id, + &target_device_id?, + &ev_type.to_string(), + event.deserialize_as().map_err(|_| { + Error::BadRequest( + ErrorKind::InvalidParam, + "Event is invalid", + ) + })?, + )?; + } + } + } + } + } + + // Save transaction id with empty data + services() + .transaction_ids + .add_txnid(&sender, None, &message_id, &[])?; + } + Edu::SigningKeyUpdate(SigningKeyUpdateContent { + user_id, + master_key, + self_signing_key, + }) => { + if user_id.server_name() != sender_servername { + continue; + } + if let Some(master_key) = master_key { + services().users.add_cross_signing_keys( + &user_id, + &master_key, + &self_signing_key, + &None, + )?; + } + } + Edu::_Custom(_) => {} + } + } + + Ok(send_transaction_message::v1::Response { + pdus: resolved_map + .into_iter() + .map(|(e, r)| (e, r.map_err(|e| e.to_string()))) + .collect(), + }) +} + +/// # `GET /_matrix/federation/v1/event/{eventId}` +/// +/// Retrieves a single event from the server. +/// +/// - Only works if a user of this server is currently invited or joined the room +pub async fn get_event_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let event = services() + .rooms + .timeline + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); + } + + Ok(get_event::v1::Response { + origin: services().globals.server_name().to_owned(), + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + pdu: PduEvent::convert_to_outgoing_federation_event(event), + }) +} + +/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` +/// +/// Retrieves events that the sender is missing. +pub async fn get_missing_events_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room", + )); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let mut queued_events = body.latest_events.clone(); + let mut events = Vec::new(); + + let mut i = 0; + while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { + if let Some(pdu) = services().rooms.timeline.get_pdu_json(&queued_events[i])? { + let room_id_str = pdu + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let event_room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + if event_room_id != body.room_id { + warn!( + "Evil event detected: Event {} found while searching in room {}", + queued_events[i], body.room_id + ); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Evil event detected", + )); + } + + if body.earliest_events.contains(&queued_events[i]) { + i += 1; + continue; + } + queued_events.extend_from_slice( + &serde_json::from_value::>( + serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { + Error::bad_database("Event in db has no prev_events field.") + })?) + .expect("canonical json is valid json value"), + ) + .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, + ); + events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); + } + i += 1; + } + + Ok(get_missing_events::v1::Response { events }) +} + +/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` +/// +/// Retrieves the auth chain for a given event. +/// +/// - This does not include the event itself +pub async fn get_event_authorization_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let event = services() + .rooms + .timeline + .get_pdu_json(&body.event_id)? + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; + + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str) + .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; + + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(room_id, vec![Arc::from(&*body.event_id)]) + .await?; + + Ok(get_event_authorization::v1::Response { + auth_chain: auth_chain_ids + .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok()?) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + }) +} + +/// # `GET /_matrix/federation/v1/state/{roomId}` +/// +/// Retrieves the current state of the room. +pub async fn get_room_state_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let shortstatehash = services() + .rooms + .state_accessor + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdus = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await? + .into_values() + .map(|id| { + PduEvent::convert_to_outgoing_federation_event( + services() + .rooms + .timeline + .get_pdu_json(&id) + .unwrap() + .unwrap(), + ) + }) + .collect(); + + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await?; + + Ok(get_room_state::v1::Response { + auth_chain: auth_chain_ids + .filter_map( + |id| match services().rooms.timeline.get_pdu_json(&id).ok()? { + Some(json) => Some(PduEvent::convert_to_outgoing_federation_event(json)), + None => { + error!("Could not find event json for {id} in db."); + None + } + }, + ) + .collect(), + pdus, + }) +} + +/// # `GET /_matrix/federation/v1/state_ids/{roomId}` +/// +/// Retrieves the current state of the room. +pub async fn get_room_state_ids_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + if !services() + .rooms + .state_cache + .server_in_room(sender_servername, &body.room_id)? + { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server is not in room.", + )); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let shortstatehash = services() + .rooms + .state_accessor + .pdu_shortstatehash(&body.event_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pdu_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await? + .into_values() + .map(|id| (*id).to_owned()) + .collect(); + + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(&body.room_id, vec![Arc::from(&*body.event_id)]) + .await?; + + Ok(get_room_state_ids::v1::Response { + auth_chain_ids: auth_chain_ids.map(|id| (*id).to_owned()).collect(), + pdu_ids, + }) +} + +/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` +/// +/// Creates a join template. +pub async fn create_join_event_template_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + if !services().rooms.metadata.exists(&body.room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server.", + )); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .unwrap() + .entry(body.room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = services().rooms.state_accessor.room_state_get( + &body.room_id, + &StateEventType::RoomJoinRules, + "", + )?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } | JoinRule::KnockRestricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::UnableToAuthorizeJoin, + "Conduit does not support restricted rooms yet.", + )); + } + } + + let room_version_id = services().rooms.state.get_room_version(&body.room_id)?; + if !body.ver.contains(&room_version_id) { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: room_version_id, + }, + "Room version not supported.", + )); + } + + let content = to_raw_value(&RoomMemberEventContent { + avatar_url: None, + blurhash: None, + displayname: None, + is_direct: None, + membership: MembershipState::Join, + third_party_invite: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("member event is valid value"); + + let (_pdu, mut pdu_json) = services().rooms.timeline.create_hash_and_sign_event( + PduBuilder { + event_type: RoomEventType::RoomMember, + content, + unsigned: None, + state_key: Some(body.user_id.to_string()), + redacts: None, + }, + &body.user_id, + &body.room_id, + &state_lock, + )?; + + drop(state_lock); + + pdu_json.remove("event_id"); + + Ok(prepare_join_event::v1::Response { + room_version: Some(room_version_id), + event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), + }) +} + +async fn create_join_event( + sender_servername: &ServerName, + room_id: &RoomId, + pdu: &RawJsonValue, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + if !services().rooms.metadata.exists(room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server.", + )); + } + + services() + .rooms + .event_handler + .acl_check(sender_servername, room_id)?; + + // TODO: Conduit does not implement restricted join rules yet, we always reject + let join_rules_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomJoinRules, + "", + )?; + + let join_rules_event_content: Option = join_rules_event + .as_ref() + .map(|join_rules_event| { + serde_json::from_str(join_rules_event.content.get()).map_err(|e| { + warn!("Invalid join rules event: {}", e); + Error::bad_database("Invalid join rules event in db.") + }) + }) + .transpose()?; + + if let Some(join_rules_event_content) = join_rules_event_content { + if matches!( + join_rules_event_content.join_rule, + JoinRule::Restricted { .. } | JoinRule::KnockRestricted { .. } + ) { + return Err(Error::BadRequest( + ErrorKind::UnableToAuthorizeJoin, + "Conduit does not support restricted rooms yet.", + )); + } + } + + // We need to return the state prior to joining, let's keep a reference to that here + let shortstatehash = services() + .rooms + .state + .get_room_shortstatehash(room_id)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Pdu state not found.", + ))?; + + let pub_key_map = RwLock::new(BTreeMap::new()); + // let mut auth_cache = EventMap::new(); + + // We do not add the event_id field to the pdu here because of signature and hashes checks + let (event_id, value) = match gen_event_id_canonical_json(pdu) { + Ok(t) => t, + Err(_) => { + // Event could not be converted to canonical json + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not convert event to canonical json.", + )); + } + }; + + let origin: OwnedServerName = serde_json::from_value( + serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event needs an origin field.", + ))?) + .expect("CanonicalJson is valid json value"), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; + + let mutex = Arc::clone( + services() + .globals + .roomid_mutex_federation + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let mutex_lock = mutex.lock().await; + let pdu_id: Vec = services() + .rooms + .event_handler + .handle_incoming_pdu(&origin, &event_id, room_id, value, true, &pub_key_map) + .await? + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Could not accept incoming PDU as timeline event.", + ))?; + drop(mutex_lock); + + let state_ids = services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await?; + let auth_chain_ids = services() + .rooms + .auth_chain + .get_auth_chain(room_id, state_ids.values().cloned().collect()) + .await?; + + let servers = services() + .rooms + .state_cache + .room_servers(room_id) + .filter_map(|r| r.ok()) + .filter(|server| &**server != services().globals.server_name()); + + services().sending.send_pdu(servers, &pdu_id)?; + + Ok(RoomState { + auth_chain: auth_chain_ids + .filter_map(|id| services().rooms.timeline.get_pdu_json(&id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + state: state_ids + .iter() + .filter_map(|(_, id)| services().rooms.timeline.get_pdu_json(id).ok().flatten()) + .map(PduEvent::convert_to_outgoing_federation_event) + .collect(), + }) +} + +/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. +pub async fn create_join_event_v1_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_join_event::v1::Response { room_state }) +} + +/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` +/// +/// Submits a signed join event. +pub async fn create_join_event_v2_route( + body: Ruma, +) -> Result { + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + let room_state = create_join_event(sender_servername, &body.room_id, &body.pdu).await?; + + Ok(create_join_event::v2::Response { room_state }) +} + +/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` +/// +/// Invites a remote user to a room. +pub async fn create_invite_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + services() + .rooms + .event_handler + .acl_check(sender_servername, &body.room_id)?; + + if !services() + .globals + .supported_room_versions() + .contains(&body.room_version) + { + return Err(Error::BadRequest( + ErrorKind::IncompatibleRoomVersion { + room_version: body.room_version.clone(), + }, + "Server does not support this room version.", + )); + } + + let mut signed_event = utils::to_canonical_object(&body.event) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; + + ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut signed_event, + &body.room_version, + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; + + // Generate event id + let event_id = EventId::parse(format!( + "${}", + ruma::signatures::reference_hash(&signed_event, &body.room_version) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + // Add event_id back + signed_event.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.to_string()), + ); + + let sender: OwnedUserId = serde_json::from_value( + signed_event + .get("sender") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no sender field.", + ))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; + + let invited_user: Box<_> = serde_json::from_value( + signed_event + .get("state_key") + .ok_or(Error::BadRequest( + ErrorKind::InvalidParam, + "Event had no state_key field.", + ))? + .clone() + .into(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; + + let mut invite_state = body.invite_room_state.clone(); + + let mut event: JsonObject = serde_json::from_str(body.event.get()) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; + + event.insert("event_id".to_owned(), "$dummy".into()); + + let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { + warn!("Invalid invite event: {}", e); + Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") + })?; + + invite_state.push(pdu.to_stripped_state_event()); + + // If the room already exists, the remote server will notify us about the join via /send + if !services().rooms.metadata.exists(&pdu.room_id)? { + services().rooms.state_cache.update_membership( + &body.room_id, + &invited_user, + MembershipState::Invite, + &sender, + Some(invite_state), + true, + )?; + } + + Ok(create_invite::v2::Response { + event: PduEvent::convert_to_outgoing_federation_event(signed_event), + }) +} + +/// # `GET /_matrix/federation/v1/user/devices/{userId}` +/// +/// Gets information on all devices of the user. +pub async fn get_devices_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let sender_servername = body + .sender_servername + .as_ref() + .expect("server is authenticated"); + + Ok(get_devices::v1::Response { + user_id: body.user_id.clone(), + stream_id: services() + .users + .get_devicelist_version(&body.user_id)? + .unwrap_or(0) + .try_into() + .expect("version will not grow that large"), + devices: services() + .users + .all_devices_metadata(&body.user_id) + .filter_map(|r| r.ok()) + .filter_map(|metadata| { + Some(UserDevice { + keys: services() + .users + .get_device_keys(&body.user_id, &metadata.device_id) + .ok()??, + device_id: metadata.device_id, + device_display_name: metadata.display_name, + }) + }) + .collect(), + master_key: services() + .users + .get_master_key(&body.user_id, &|u| u.server_name() == sender_servername)?, + self_signing_key: services() + .users + .get_self_signing_key(&body.user_id, &|u| u.server_name() == sender_servername)?, + }) +} + +/// # `GET /_matrix/federation/v1/query/directory` +/// +/// Resolve a room alias to a room id. +pub async fn get_room_information_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let room_id = services() + .rooms + .alias + .resolve_local_alias(&body.room_alias)? + .ok_or(Error::BadRequest( + ErrorKind::NotFound, + "Room alias not found.", + ))?; + + Ok(get_room_information::v1::Response { + room_id, + servers: vec![services().globals.server_name().to_owned()], + }) +} + +/// # `GET /_matrix/federation/v1/query/profile` +/// +/// Gets information on a profile. +pub async fn get_profile_information_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let mut displayname = None; + let mut avatar_url = None; + let mut blurhash = None; + + match &body.field { + Some(ProfileField::DisplayName) => { + displayname = services().users.displayname(&body.user_id)? + } + Some(ProfileField::AvatarUrl) => { + avatar_url = services().users.avatar_url(&body.user_id)?; + blurhash = services().users.blurhash(&body.user_id)? + } + // TODO: what to do with custom + Some(_) => {} + None => { + displayname = services().users.displayname(&body.user_id)?; + avatar_url = services().users.avatar_url(&body.user_id)?; + blurhash = services().users.blurhash(&body.user_id)?; + } + } + + Ok(get_profile_information::v1::Response { + blurhash, + displayname, + avatar_url, + }) +} + +/// # `POST /_matrix/federation/v1/user/keys/query` +/// +/// Gets devices and identity keys for the given users. +pub async fn get_keys_route(body: Ruma) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let result = get_keys_helper(None, &body.device_keys, |u| { + Some(u.server_name()) == body.sender_servername.as_deref() + }) + .await?; + + Ok(get_keys::v1::Response { + device_keys: result.device_keys, + master_keys: result.master_keys, + self_signing_keys: result.self_signing_keys, + }) +} + +/// # `POST /_matrix/federation/v1/user/keys/claim` +/// +/// Claims one-time keys. +pub async fn claim_keys_route( + body: Ruma, +) -> Result { + if !services().globals.allow_federation() { + return Err(Error::bad_config("Federation is disabled.")); + } + + let result = claim_keys_helper(&body.one_time_keys).await?; + + Ok(claim_keys::v1::Response { + one_time_keys: result.one_time_keys, + }) +} + +#[cfg(test)] +mod tests { + use super::{add_port_to_hostname, get_ip_with_port, FedDest}; + + #[test] + fn ips_get_default_ports() { + assert_eq!( + get_ip_with_port("1.1.1.1"), + Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) + ); + assert_eq!( + get_ip_with_port("dead:beef::"), + Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) + ); + } + + #[test] + fn ips_keep_custom_ports() { + assert_eq!( + get_ip_with_port("1.1.1.1:1234"), + Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) + ); + assert_eq!( + get_ip_with_port("[dead::beef]:8933"), + Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) + ); + } + + #[test] + fn hostnames_get_default_ports() { + assert_eq!( + add_port_to_hostname("example.com"), + FedDest::Named(String::from("example.com"), String::from(":8448")) + ) + } + + #[test] + fn hostnames_keep_custom_ports() { + assert_eq!( + add_port_to_hostname("example.com:1337"), + FedDest::Named(String::from("example.com"), String::from(":1337")) + ) + } +} diff --git a/src/client_server/account.rs b/src/client_server/account.rs deleted file mode 100644 index 4b3ad0d4..00000000 --- a/src/client_server/account.rs +++ /dev/null @@ -1,765 +0,0 @@ -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, -}; - -use super::{DEVICE_ID_LENGTH, SESSION_ID_LENGTH, TOKEN_LENGTH}; -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; -use ruma::{ - api::client::{ - error::ErrorKind, - r0::{ - account::{ - change_password, deactivate, get_username_availability, register, whoami, - ThirdPartyIdRemovalStatus, - }, - contact::get_contacts, - uiaa::{AuthFlow, AuthType, UiaaInfo}, - }, - }, - events::{ - room::{ - canonical_alias::RoomCanonicalAliasEventContent, - create::RoomCreateEventContent, - guest_access::{GuestAccess, RoomGuestAccessEventContent}, - history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, - join_rules::{JoinRule, RoomJoinRulesEventContent}, - member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, - name::RoomNameEventContent, - power_levels::RoomPowerLevelsEventContent, - topic::RoomTopicEventContent, - }, - EventType, - }, - identifiers::RoomName, - push, RoomAliasId, RoomId, RoomVersionId, UserId, -}; -use serde_json::value::to_raw_value; -use tracing::info; - -use register::RegistrationKind; -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - -const GUEST_NAME_LENGTH: usize = 10; - -/// # `GET /_matrix/client/r0/register/available` -/// -/// Checks if a username is valid and available on this server. -/// -/// Conditions for returning true: -/// - The user id is not historical -/// - The server name of the user id matches this server -/// - No user or appservice on this server already claimed this username -/// -/// Note: This will not reserve the username, so the username might become invalid when trying to register -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/register/available", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_register_available_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - // Validate user id - let user_id = UserId::parse_with_server_name(body.username.clone(), db.globals.server_name()) - .ok() - .filter(|user_id| { - !user_id.is_historical() && user_id.server_name() == db.globals.server_name() - }) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; - - // Check if username is creative enough - if db.users.exists(&user_id)? { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } - - // TODO add check for appservice namespaces - - // If no if check is true we have an username that's available to be used. - Ok(get_username_availability::Response { available: true }.into()) -} - -/// # `POST /_matrix/client/r0/register` -/// -/// Register an account on this homeserver. -/// -/// You can use [`GET /_matrix/client/r0/register/available`](fn.get_register_available_route.html) -/// to check if the user id is valid and available. -/// -/// - Only works if registration is enabled -/// - If type is guest: ignores all parameters except initial_device_display_name -/// - If sender is not appservice: Requires UIAA (but we only use a dummy stage) -/// - If type is not guest and no username is given: Always fails after UIAA check -/// - Creates a new account and populates it with default account data -/// - If `inhibit_login` is false: Creates a device and returns device id and access_token -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/register", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn register_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_registration() && !body.from_appservice { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Registration has been disabled.", - )); - } - - let is_guest = body.kind == RegistrationKind::Guest; - - let mut missing_username = false; - - // Validate user id - let user_id = UserId::parse_with_server_name( - if is_guest { - utils::random_string(GUEST_NAME_LENGTH) - } else { - body.username.clone().unwrap_or_else(|| { - // If the user didn't send a username field, that means the client is just trying - // the get an UIAA error to see available flows - missing_username = true; - // Just give the user a random name. He won't be able to register with it anyway. - utils::random_string(GUEST_NAME_LENGTH) - }) - } - .to_lowercase(), - db.globals.server_name(), - ) - .ok() - .filter(|user_id| !user_id.is_historical() && user_id.server_name() == db.globals.server_name()) - .ok_or(Error::BadRequest( - ErrorKind::InvalidUsername, - "Username is invalid.", - ))?; - - // Check if username is creative enough - if db.users.exists(&user_id)? { - return Err(Error::BadRequest( - ErrorKind::UserInUse, - "Desired user ID is already taken.", - )); - } - - // UIAA - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Dummy], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if !body.from_appservice { - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - &UserId::parse_with_server_name("", db.globals.server_name()) - .expect("we know this is valid"), - "".into(), - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa.create( - &UserId::parse_with_server_name("", db.globals.server_name()) - .expect("we know this is valid"), - "".into(), - &uiaainfo, - &json, - )?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } - } - - if missing_username { - return Err(Error::BadRequest( - ErrorKind::MissingParam, - "Missing username field.", - )); - } - - let password = if is_guest { - None - } else { - body.password.as_deref() - }; - - // Create user - db.users.create(&user_id, password)?; - - // Default to pretty displayname - let displayname = format!("{} ⚡️", user_id.localpart()); - db.users - .set_displayname(&user_id, Some(displayname.clone()))?; - - // Initial account data - db.account_data.update( - None, - &user_id, - EventType::PushRules, - &ruma::events::push_rules::PushRulesEvent { - content: ruma::events::push_rules::PushRulesEventContent { - global: push::Ruleset::server_default(&user_id), - }, - }, - &db.globals, - )?; - - // Inhibit login does not work for guests - if !is_guest && body.inhibit_login { - return Ok(register::Response { - access_token: None, - user_id, - device_id: None, - } - .into()); - } - - // Generate new device id if the user didn't specify one - let device_id = if is_guest { - None - } else { - body.device_id.clone() - } - .unwrap_or_else(|| utils::random_string(DEVICE_ID_LENGTH).into()); - - // Generate new token for the device - let token = utils::random_string(TOKEN_LENGTH); - - // Create device for this account - db.users.create_device( - &user_id, - &device_id, - &token, - body.initial_device_display_name.clone(), - )?; - - // If this is the first user on this server, create the admin room - if db.users.count()? == 1 { - // Create a user for the server - let conduit_user = UserId::parse_with_server_name("conduit", db.globals.server_name()) - .expect("@conduit:server_name is valid"); - - db.users.create(&conduit_user, None)?; - - let room_id = RoomId::new(db.globals.server_name()); - - db.rooms.get_or_create_shortroomid(&room_id, &db.globals)?; - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut content = RoomCreateEventContent::new(conduit_user.clone()); - content.federate = true; - content.predecessor = None; - content.room_version = RoomVersionId::Version6; - - // 1. The room create event - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCreate, - content: to_raw_value(&content).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 2. Make conduit bot join - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(conduit_user.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 3. Power levels - let mut users = BTreeMap::new(); - users.insert(conduit_user.clone(), 100.into()); - users.insert(user_id.clone(), 100.into()); - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomPowerLevels, - content: to_raw_value(&RoomPowerLevelsEventContent { - users, - ..Default::default() - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.1 Join Rules - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomJoinRules, - content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.2 History Visibility - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomHistoryVisibility, - content: to_raw_value(&RoomHistoryVisibilityEventContent::new( - HistoryVisibility::Shared, - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 4.3 Guest Access - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomGuestAccess, - content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // 6. Events implied by name and topic - let room_name = - Box::::try_from(format!("{} Admin Room", db.globals.server_name())) - .expect("Room name is valid"); - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomName, - content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomTopic, - content: to_raw_value(&RoomTopicEventContent { - topic: format!("Manage {}", db.globals.server_name()), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - // Room alias - let alias: RoomAliasId = format!("#admins:{}", db.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid alias name"); - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomCanonicalAlias, - content: to_raw_value(&RoomCanonicalAliasEventContent { - alias: Some(alias.clone()), - alt_aliases: Vec::new(), - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some("".to_owned()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - - db.rooms.set_alias(&alias, Some(&room_id), &db.globals)?; - - // Invite and join the real user - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Invite, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&RoomMemberEventContent { - membership: MembershipState::Join, - displayname: Some(displayname), - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - }) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - &user_id, - &room_id, - &db, - &state_lock, - )?; - - // Send welcome message - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: to_raw_value(&RoomMessageEventContent::text_html( - "## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`".to_owned(), - "

Thank you for trying out Conduit!

\n

Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

\n

Helpful links:

\n
\n

Website: https://conduit.rs
Git and Documentation: https://gitlab.com/famedly/conduit
Report issues: https://gitlab.com/famedly/conduit/-/issues

\n
\n

Here are some rooms you can join (by typing the command):

\n

Conduit room (Ask questions and get notified on updates):
/join #conduit:fachschaften.org

\n

Conduit lounge (Off-topic, only Conduit users are allowed to join)
/join #conduit-lounge:conduit.rs

\n".to_owned(), - )) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &room_id, - &db, - &state_lock, - )?; - } - - info!("{} registered on this server", user_id); - - db.flush()?; - - Ok(register::Response { - access_token: Some(token), - user_id, - device_id: Some(device_id), - } - .into()) -} - -/// # `POST /_matrix/client/r0/account/password` -/// -/// Changes the password of this account. -/// -/// - Requires UIAA to verify user password -/// - Changes the password of the sender user -/// - The password hash is calculated using argon2 with 32 character salt, the plain password is -/// not saved -/// -/// If logout_devices is true it does the following for each device except the sender device: -/// - Invalidates access token -/// - Deletes device metadata (device id, device display name, last seen ip, last seen ts) -/// - Forgets to-device events -/// - Triggers device list updates -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/account/password", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn change_password_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Password], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } - - db.users - .set_password(sender_user, Some(&body.new_password))?; - - if body.logout_devices { - // Logout all devices except the current one - for id in db - .users - .all_device_ids(sender_user) - .filter_map(|id| id.ok()) - .filter(|id| id != sender_device) - { - db.users.remove_device(sender_user, &id)?; - } - } - - db.flush()?; - - Ok(change_password::Response {}.into()) -} - -/// # `GET _matrix/client/r0/account/whoami` -/// -/// Get user_id of the sender user. -/// -/// Note: Also works for Application Services -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/account/whoami", data = "") -)] -#[tracing::instrument(skip(body))] -pub async fn whoami_route(body: Ruma) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - Ok(whoami::Response { - user_id: sender_user.clone(), - } - .into()) -} - -/// # `POST /_matrix/client/r0/account/deactivate` -/// -/// Deactivate sender user account. -/// -/// - Leaves all rooms and rejects all invitations -/// - Invalidates all access tokens -/// - Deletes all device metadata (device id, device display name, last seen ip, last seen ts) -/// - Forgets all to-device events -/// - Triggers device list updates -/// - Removes ability to log in again -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/account/deactivate", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn deactivate_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - - let mut uiaainfo = UiaaInfo { - flows: vec![AuthFlow { - stages: vec![AuthType::Password], - }], - completed: Vec::new(), - params: Default::default(), - session: None, - auth_error: None, - }; - - if let Some(auth) = &body.auth { - let (worked, uiaainfo) = db.uiaa.try_auth( - sender_user, - sender_device, - auth, - &uiaainfo, - &db.users, - &db.globals, - )?; - if !worked { - return Err(Error::Uiaa(uiaainfo)); - } - // Success! - } else if let Some(json) = body.json_body { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - db.uiaa - .create(sender_user, sender_device, &uiaainfo, &json)?; - return Err(Error::Uiaa(uiaainfo)); - } else { - return Err(Error::BadRequest(ErrorKind::NotJson, "Not json.")); - } - - // Leave all joined rooms and reject all invitations - // TODO: work over federation invites - let all_rooms = db - .rooms - .rooms_joined(sender_user) - .chain( - db.rooms - .rooms_invited(sender_user) - .map(|t| t.map(|(r, _)| r)), - ) - .collect::>(); - - for room_id in all_rooms { - let room_id = room_id?; - let event = RoomMemberEventContent { - membership: MembershipState::Leave, - displayname: None, - avatar_url: None, - is_direct: None, - third_party_invite: None, - blurhash: None, - reason: None, - }; - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(sender_user.to_string()), - redacts: None, - }, - sender_user, - &room_id, - &db, - &state_lock, - )?; - } - - // Remove devices and mark account as deactivated - db.users.deactivate_account(sender_user)?; - - info!("{} deactivated their account", sender_user); - - db.flush()?; - - Ok(deactivate::Response { - id_server_unbind_result: ThirdPartyIdRemovalStatus::NoSupport, - } - .into()) -} - -/// # `GET _matrix/client/r0/account/3pid` -/// -/// Get a list of third party identifiers associated with this account. -/// -/// - Currently always returns empty list -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/account/3pid", data = "") -)] -pub async fn third_party_route( - body: Ruma, -) -> ConduitResult { - let _sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - Ok(get_contacts::Response::new(Vec::new()).into()) -} diff --git a/src/client_server/backup.rs b/src/client_server/backup.rs deleted file mode 100644 index bbb86726..00000000 --- a/src/client_server/backup.rs +++ /dev/null @@ -1,432 +0,0 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; -use ruma::api::client::{ - error::ErrorKind, - r0::backup::{ - add_backup_key_session, add_backup_key_sessions, add_backup_keys, create_backup, - delete_backup, delete_backup_key_session, delete_backup_key_sessions, delete_backup_keys, - get_backup, get_backup_key_session, get_backup_key_sessions, get_backup_keys, - get_latest_backup, update_backup, - }, -}; - -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, post, put}; - -/// # `POST /_matrix/client/r0/room_keys/version` -/// -/// Creates a new backup. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/unstable/room_keys/version", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn create_backup_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let version = db - .key_backups - .create_backup(sender_user, &body.algorithm, &db.globals)?; - - db.flush()?; - - Ok(create_backup::Response { version }.into()) -} - -/// # `PUT /_matrix/client/r0/room_keys/version/{version}` -/// -/// Update information about an existing backup. Only `auth_data` can be modified. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn update_backup_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - db.key_backups - .update_backup(sender_user, &body.version, &body.algorithm, &db.globals)?; - - db.flush()?; - - Ok(update_backup::Response {}.into()) -} - -/// # `GET /_matrix/client/r0/room_keys/version` -/// -/// Get information about the latest backup version. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/version", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_latest_backup_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let (version, algorithm) = - db.key_backups - .get_latest_backup(sender_user)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Key backup does not exist.", - ))?; - - Ok(get_latest_backup::Response { - algorithm, - count: (db.key_backups.count_keys(sender_user, &version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &version)?, - version, - } - .into()) -} - -/// # `GET /_matrix/client/r0/room_keys/version` -/// -/// Get information about an existing backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_backup_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let algorithm = db - .key_backups - .get_backup(sender_user, &body.version)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Key backup does not exist.", - ))?; - - Ok(get_backup::Response { - algorithm, - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, - version: body.version.to_owned(), - } - .into()) -} - -/// # `DELETE /_matrix/client/r0/room_keys/version/{version}` -/// -/// Delete an existing key backup. -/// -/// - Deletes both information about the backup, as well as all key data related to the backup -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/version/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn delete_backup_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - db.key_backups.delete_backup(sender_user, &body.version)?; - - db.flush()?; - - Ok(delete_backup::Response {}.into()) -} - -/// # `PUT /_matrix/client/r0/room_keys/keys` -/// -/// Add the received backup keys to the database. -/// -/// - Only manipulating the most recently created version of the backup is allowed -/// - Adds the keys to the backup -/// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn add_backup_keys_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if Some(&body.version) - != db - .key_backups - .get_latest_backup_version(sender_user)? - .as_ref() - { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", - )); - } - - for (room_id, room) in &body.rooms { - for (session_id, key_data) in &room.sessions { - db.key_backups.add_key( - sender_user, - &body.version, - room_id, - session_id, - key_data, - &db.globals, - )? - } - } - - db.flush()?; - - Ok(add_backup_keys::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) -} - -/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}` -/// -/// Add the received backup keys to the database. -/// -/// - Only manipulating the most recently created version of the backup is allowed -/// - Adds the keys to the backup -/// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn add_backup_key_sessions_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if Some(&body.version) - != db - .key_backups - .get_latest_backup_version(sender_user)? - .as_ref() - { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", - )); - } - - for (session_id, key_data) in &body.sessions { - db.key_backups.add_key( - sender_user, - &body.version, - &body.room_id, - session_id, - key_data, - &db.globals, - )? - } - - db.flush()?; - - Ok(add_backup_key_sessions::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) -} - -/// # `PUT /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` -/// -/// Add the received backup key to the database. -/// -/// - Only manipulating the most recently created version of the backup is allowed -/// - Adds the keys to the backup -/// - Returns the new number of keys in this backup and the etag -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn add_backup_key_session_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if Some(&body.version) - != db - .key_backups - .get_latest_backup_version(sender_user)? - .as_ref() - { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "You may only manipulate the most recently created version of the backup.", - )); - } - - db.key_backups.add_key( - sender_user, - &body.version, - &body.room_id, - &body.session_id, - &body.session_data, - &db.globals, - )?; - - db.flush()?; - - Ok(add_backup_key_session::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) -} - -/// # `GET /_matrix/client/r0/room_keys/keys` -/// -/// Retrieves all keys from the backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_backup_keys_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let rooms = db.key_backups.get_all(sender_user, &body.version)?; - - Ok(get_backup_keys::Response { rooms }.into()) -} - -/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}` -/// -/// Retrieves all keys from the backup for a given room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_backup_key_sessions_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let sessions = db - .key_backups - .get_room(sender_user, &body.version, &body.room_id)?; - - Ok(get_backup_key_sessions::Response { sessions }.into()) -} - -/// # `GET /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` -/// -/// Retrieves a key from the backup. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_backup_key_session_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let key_data = db - .key_backups - .get_session(sender_user, &body.version, &body.room_id, &body.session_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Backup key not found for this user's session.", - ))?; - - Ok(get_backup_key_session::Response { key_data }.into()) -} - -/// # `DELETE /_matrix/client/r0/room_keys/keys` -/// -/// Delete the keys from the backup. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn delete_backup_keys_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - db.key_backups.delete_all_keys(sender_user, &body.version)?; - - db.flush()?; - - Ok(delete_backup_keys::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) -} - -/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}` -/// -/// Delete the keys from the backup for a given room. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn delete_backup_key_sessions_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - db.key_backups - .delete_room_keys(sender_user, &body.version, &body.room_id)?; - - db.flush()?; - - Ok(delete_backup_key_sessions::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) -} - -/// # `DELETE /_matrix/client/r0/room_keys/keys/{roomId}/{sessionId}` -/// -/// Delete a key from the backup. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/unstable/room_keys/keys/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn delete_backup_key_session_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - db.key_backups - .delete_room_key(sender_user, &body.version, &body.room_id, &body.session_id)?; - - db.flush()?; - - Ok(delete_backup_key_session::Response { - count: (db.key_backups.count_keys(sender_user, &body.version)? as u32).into(), - etag: db.key_backups.get_etag(sender_user, &body.version)?, - } - .into()) -} diff --git a/src/client_server/capabilities.rs b/src/client_server/capabilities.rs deleted file mode 100644 index f86b23b5..00000000 --- a/src/client_server/capabilities.rs +++ /dev/null @@ -1,35 +0,0 @@ -use crate::{ConduitResult, Ruma}; -use ruma::{ - api::client::r0::capabilities::{ - get_capabilities, Capabilities, RoomVersionStability, RoomVersionsCapability, - }, - RoomVersionId, -}; -use std::collections::BTreeMap; - -#[cfg(feature = "conduit_bin")] -use rocket::get; - -/// # `GET /_matrix/client/r0/capabilities` -/// -/// Get information on the supported feature set and other relevent capabilities of this server. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/capabilities", data = "<_body>") -)] -#[tracing::instrument(skip(_body))] -pub async fn get_capabilities_route( - _body: Ruma, -) -> ConduitResult { - let mut available = BTreeMap::new(); - available.insert(RoomVersionId::Version5, RoomVersionStability::Stable); - available.insert(RoomVersionId::Version6, RoomVersionStability::Stable); - - let mut capabilities = Capabilities::new(); - capabilities.room_versions = RoomVersionsCapability { - default: RoomVersionId::Version6, - available, - }; - - Ok(get_capabilities::Response { capabilities }.into()) -} diff --git a/src/client_server/context.rs b/src/client_server/context.rs deleted file mode 100644 index 97fc4fd8..00000000 --- a/src/client_server/context.rs +++ /dev/null @@ -1,109 +0,0 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; -use ruma::api::client::{error::ErrorKind, r0::context::get_context}; -use std::convert::TryFrom; - -#[cfg(feature = "conduit_bin")] -use rocket::get; - -/// # `GET /_matrix/client/r0/rooms/{roomId}/context` -/// -/// Allows loading room history around an event. -/// -/// - Only works if the user is joined (TODO: always allow, but only show events if the user was -/// joined, depending on history_visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/context/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_context_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_user, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } - - let base_pdu_id = db - .rooms - .get_pdu_id(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Base event id not found.", - ))?; - - let base_token = db.rooms.pdu_count(&base_pdu_id)?; - - let base_event = db - .rooms - .get_pdu_from_id(&base_pdu_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Base event not found.", - ))? - .to_room_event(); - - let events_before: Vec<_> = db - .rooms - .pdus_until(sender_user, &body.room_id, base_token)? - .take( - u32::try_from(body.limit).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") - })? as usize - / 2, - ) - .filter_map(|r| r.ok()) // Remove buggy events - .collect(); - - let start_token = events_before - .last() - .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) - .map(|count| count.to_string()); - - let events_before: Vec<_> = events_before - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); - - let events_after: Vec<_> = db - .rooms - .pdus_after(sender_user, &body.room_id, base_token)? - .take( - u32::try_from(body.limit).map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "Limit value is invalid.") - })? as usize - / 2, - ) - .filter_map(|r| r.ok()) // Remove buggy events - .collect(); - - let end_token = events_after - .last() - .and_then(|(pdu_id, _)| db.rooms.pdu_count(pdu_id).ok()) - .map(|count| count.to_string()); - - let events_after: Vec<_> = events_after - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); - - let mut resp = get_context::Response::new(); - resp.start = start_token; - resp.end = end_token; - resp.events_before = events_before; - resp.event = Some(base_event); - resp.events_after = events_after; - resp.state = db // TODO: State at event - .rooms - .room_state_full(&body.room_id)? - .values() - .map(|pdu| pdu.to_state_event()) - .collect(); - - Ok(resp.into()) -} diff --git a/src/client_server/filter.rs b/src/client_server/filter.rs deleted file mode 100644 index dfb53770..00000000 --- a/src/client_server/filter.rs +++ /dev/null @@ -1,32 +0,0 @@ -use crate::{utils, ConduitResult}; -use ruma::api::client::r0::filter::{self, create_filter, get_filter}; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - -/// # `GET /_matrix/client/r0/user/{userId}/filter/{filterId}` -/// -/// TODO: Loads a filter that was previously created. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/user/<_>/filter/<_>"))] -#[tracing::instrument] -pub async fn get_filter_route() -> ConduitResult { - // TODO - Ok(get_filter::Response::new(filter::IncomingFilterDefinition { - event_fields: None, - event_format: filter::EventFormat::default(), - account_data: filter::IncomingFilter::default(), - room: filter::IncomingRoomFilter::default(), - presence: filter::IncomingFilter::default(), - }) - .into()) -} - -/// # `PUT /_matrix/client/r0/user/{userId}/filter` -/// -/// TODO: Creates a new filter to be used by other endpoints. -#[cfg_attr(feature = "conduit_bin", post("/_matrix/client/r0/user/<_>/filter"))] -#[tracing::instrument] -pub async fn create_filter_route() -> ConduitResult { - // TODO - Ok(create_filter::Response::new(utils::random_string(10)).into()) -} diff --git a/src/client_server/media.rs b/src/client_server/media.rs deleted file mode 100644 index 0a7f4bb5..00000000 --- a/src/client_server/media.rs +++ /dev/null @@ -1,198 +0,0 @@ -use crate::{ - database::{media::FileMeta, DatabaseGuard}, - utils, ConduitResult, Error, Ruma, -}; -use ruma::api::client::{ - error::ErrorKind, - r0::media::{create_content, get_content, get_content_thumbnail, get_media_config}, -}; -use std::convert::TryInto; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, post}; - -const MXC_LENGTH: usize = 32; - -/// # `GET /_matrix/media/r0/config` -/// -/// Returns max upload size. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/media/r0/config"))] -#[tracing::instrument(skip(db))] -pub async fn get_media_config_route( - db: DatabaseGuard, -) -> ConduitResult { - Ok(get_media_config::Response { - upload_size: db.globals.max_request_size().into(), - } - .into()) -} - -/// # `POST /_matrix/media/r0/upload` -/// -/// Permanently save media in the server. -/// -/// - Some metadata will be saved in the database -/// - Media will be saved in the media/ directory -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/media/r0/upload", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn create_content_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let mxc = format!( - "mxc://{}/{}", - db.globals.server_name(), - utils::random_string(MXC_LENGTH) - ); - - db.media - .create( - mxc.clone(), - &db.globals, - &body - .filename - .as_ref() - .map(|filename| "inline; filename=".to_owned() + filename) - .as_deref(), - &body.content_type.as_deref(), - &body.file, - ) - .await?; - - db.flush()?; - - Ok(create_content::Response { - content_uri: mxc.try_into().expect("Invalid mxc:// URI"), - blurhash: None, - } - .into()) -} - -/// # `POST /_matrix/media/r0/download/{serverName}/{mediaId}` -/// -/// Load media from our server or over federation. -/// -/// - Only allows federation if `allow_remote` is true -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/media/r0/download/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_content_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); - - if let Some(FileMeta { - content_disposition, - content_type, - file, - }) = db.media.get(&db.globals, &mxc).await? - { - Ok(get_content::Response { - file, - content_type, - content_disposition, - } - .into()) - } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_content_response = db - .sending - .send_federation_request( - &db.globals, - &body.server_name, - get_content::Request { - allow_remote: false, - server_name: &body.server_name, - media_id: &body.media_id, - }, - ) - .await?; - - db.media - .create( - mxc, - &db.globals, - &get_content_response.content_disposition.as_deref(), - &get_content_response.content_type.as_deref(), - &get_content_response.file, - ) - .await?; - - Ok(get_content_response.into()) - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) - } -} - -/// # `POST /_matrix/media/r0/thumbnail/{serverName}/{mediaId}` -/// -/// Load media thumbnail from our server or over federation. -/// -/// - Only allows federation if `allow_remote` is true -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/media/r0/thumbnail/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_content_thumbnail_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let mxc = format!("mxc://{}/{}", body.server_name, body.media_id); - - if let Some(FileMeta { - content_type, file, .. - }) = db - .media - .get_thumbnail( - mxc.clone(), - &db.globals, - body.width - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - body.height - .try_into() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Width is invalid."))?, - ) - .await? - { - Ok(get_content_thumbnail::Response { file, content_type }.into()) - } else if &*body.server_name != db.globals.server_name() && body.allow_remote { - let get_thumbnail_response = db - .sending - .send_federation_request( - &db.globals, - &body.server_name, - get_content_thumbnail::Request { - allow_remote: false, - height: body.height, - width: body.width, - method: body.method.clone(), - server_name: &body.server_name, - media_id: &body.media_id, - }, - ) - .await?; - - db.media - .upload_thumbnail( - mxc, - &db.globals, - &None, - &get_thumbnail_response.content_type, - body.width.try_into().expect("all UInts are valid u32s"), - body.height.try_into().expect("all UInts are valid u32s"), - &get_thumbnail_response.file, - ) - .await?; - - Ok(get_thumbnail_response.into()) - } else { - Err(Error::BadRequest(ErrorKind::NotFound, "Media not found.")) - } -} diff --git a/src/client_server/message.rs b/src/client_server/message.rs deleted file mode 100644 index abbbe8ea..00000000 --- a/src/client_server/message.rs +++ /dev/null @@ -1,207 +0,0 @@ -use crate::{database::DatabaseGuard, pdu::PduBuilder, utils, ConduitResult, Error, Ruma}; -use ruma::{ - api::client::{ - error::ErrorKind, - r0::message::{get_message_events, send_message_event}, - }, - events::EventType, - EventId, -}; -use std::{ - collections::BTreeMap, - convert::{TryFrom, TryInto}, - sync::Arc, -}; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, put}; - -/// # `PUT /_matrix/client/r0/rooms/{roomId}/send/{eventType}/{txnId}` -/// -/// Send a message event into the room. -/// -/// - Is a NOOP if the txn id was already used before and returns the same event id again -/// - The only requirement for the content is that it has to be valid json -/// - Tries to send the event into the room, auth rules will determine if it is allowed -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/send/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn send_message_event_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_deref(); - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(body.room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Forbid m.room.encrypted if encryption is disabled - if &body.event_type == "m.room.encrypted" && !db.globals.allow_encryption() { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Encryption has been disabled", - )); - } - - // Check if this is a new transaction id - if let Some(response) = - db.transaction_ids - .existing_txnid(sender_user, sender_device, &body.txn_id)? - { - // The client might have sent a txnid of the /sendToDevice endpoint - // This txnid has no response associated with it - if response.is_empty() { - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Tried to use txn id already used for an incompatible endpoint.", - )); - } - - let event_id = EventId::try_from( - utils::string_from_bytes(&response) - .map_err(|_| Error::bad_database("Invalid txnid bytes in database."))?, - ) - .map_err(|_| Error::bad_database("Invalid event id in txnid data."))?; - return Ok(send_message_event::Response { event_id }.into()); - } - - let mut unsigned = BTreeMap::new(); - unsigned.insert("transaction_id".to_owned(), body.txn_id.clone().into()); - - let event_id = db.rooms.build_and_append_pdu( - PduBuilder { - event_type: EventType::from(&body.event_type), - content: serde_json::from_str(body.body.body.json().get()) - .map_err(|_| Error::BadRequest(ErrorKind::BadJson, "Invalid JSON body."))?, - unsigned: Some(unsigned), - state_key: None, - redacts: None, - }, - sender_user, - &body.room_id, - &db, - &state_lock, - )?; - - db.transaction_ids.add_txnid( - sender_user, - sender_device, - &body.txn_id, - event_id.as_bytes(), - )?; - - drop(state_lock); - - db.flush()?; - - Ok(send_message_event::Response::new(event_id).into()) -} - -/// # `GET /_matrix/client/r0/rooms/{roomId}/messages` -/// -/// Allows paginating through room history. -/// -/// - Only works if the user is joined (TODO: always allow, but only show events where the user was -/// joined, depending on history_visibility) -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/rooms/<_>/messages", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_message_events_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if !db.rooms.is_joined(sender_user, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "You don't have permission to view this room.", - )); - } - - let from = body - .from - .clone() - .parse() - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid `from` value."))?; - - let to = body.to.as_ref().map(|t| t.parse()); - - // Use limit or else 10 - let limit = body.limit.try_into().map_or(10_usize, |l: u32| l as usize); - - match body.dir { - get_message_events::Direction::Forward => { - let events_after: Vec<_> = db - .rooms - .pdus_after(sender_user, &body.room_id, from)? - .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events - .filter_map(|(pdu_id, pdu)| { - db.rooms - .pdu_count(&pdu_id) - .map(|pdu_count| (pdu_count, pdu)) - .ok() - }) - .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` - .collect(); - - let end_token = events_after.last().map(|(count, _)| count.to_string()); - - let events_after: Vec<_> = events_after - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); - - let mut resp = get_message_events::Response::new(); - resp.start = Some(body.from.to_owned()); - resp.end = end_token; - resp.chunk = events_after; - resp.state = Vec::new(); - - Ok(resp.into()) - } - get_message_events::Direction::Backward => { - let events_before: Vec<_> = db - .rooms - .pdus_until(sender_user, &body.room_id, from)? - .take(limit) - .filter_map(|r| r.ok()) // Filter out buggy events - .filter_map(|(pdu_id, pdu)| { - db.rooms - .pdu_count(&pdu_id) - .map(|pdu_count| (pdu_count, pdu)) - .ok() - }) - .take_while(|&(k, _)| Some(Ok(k)) != to) // Stop at `to` - .collect(); - - let start_token = events_before.last().map(|(count, _)| count.to_string()); - - let events_before: Vec<_> = events_before - .into_iter() - .map(|(_, pdu)| pdu.to_room_event()) - .collect(); - - let mut resp = get_message_events::Response::new(); - resp.start = Some(body.from.to_owned()); - resp.end = start_token; - resp.chunk = events_before; - resp.state = Vec::new(); - - Ok(resp.into()) - } - } -} diff --git a/src/client_server/read_marker.rs b/src/client_server/read_marker.rs deleted file mode 100644 index 60aa4cef..00000000 --- a/src/client_server/read_marker.rs +++ /dev/null @@ -1,143 +0,0 @@ -use crate::{database::DatabaseGuard, ConduitResult, Error, Ruma}; -use ruma::{ - api::client::{ - error::ErrorKind, - r0::{read_marker::set_read_marker, receipt::create_receipt}, - }, - events::{AnyEphemeralRoomEvent, EventType}, - receipt::ReceiptType, - MilliSecondsSinceUnixEpoch, -}; -use std::collections::BTreeMap; - -#[cfg(feature = "conduit_bin")] -use rocket::post; - -/// # `POST /_matrix/client/r0/rooms/{roomId}/read_markers` -/// -/// Sets different types of read markers. -/// -/// - Updates fully-read account data event to `fully_read` -/// - If `read_receipt` is set: Update private marker and public read receipt EDU -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/read_markers", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn set_read_marker_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let fully_read_event = ruma::events::fully_read::FullyReadEvent { - content: ruma::events::fully_read::FullyReadEventContent { - event_id: body.fully_read.clone(), - }, - }; - db.account_data.update( - Some(&body.room_id), - sender_user, - EventType::FullyRead, - &fully_read_event, - &db.globals, - )?; - - if let Some(event) = &body.read_receipt { - db.rooms.edus.private_read_set( - &body.room_id, - sender_user, - db.rooms.get_pdu_count(event)?.ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, - &db.globals, - )?; - db.rooms - .reset_notification_counts(sender_user, &body.room_id)?; - - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_user.clone(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - }, - ); - - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event.to_owned(), receipts); - - db.rooms.edus.readreceipt_update( - sender_user, - &body.room_id, - AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }), - &db.globals, - )?; - } - - db.flush()?; - - Ok(set_read_marker::Response {}.into()) -} - -/// # `POST /_matrix/client/r0/rooms/{roomId}/receipt/{receiptType}/{eventId}` -/// -/// Sets private read marker and public read receipt EDU. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/rooms/<_>/receipt/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn create_receipt_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - db.rooms.edus.private_read_set( - &body.room_id, - sender_user, - db.rooms - .get_pdu_count(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event does not exist.", - ))?, - &db.globals, - )?; - db.rooms - .reset_notification_counts(sender_user, &body.room_id)?; - - let mut user_receipts = BTreeMap::new(); - user_receipts.insert( - sender_user.clone(), - ruma::events::receipt::Receipt { - ts: Some(MilliSecondsSinceUnixEpoch::now()), - }, - ); - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(body.event_id.to_owned(), receipts); - - db.rooms.edus.readreceipt_update( - sender_user, - &body.room_id, - AnyEphemeralRoomEvent::Receipt(ruma::events::receipt::ReceiptEvent { - content: ruma::events::receipt::ReceiptEventContent(receipt_content), - room_id: body.room_id.clone(), - }), - &db.globals, - )?; - - db.flush()?; - - Ok(create_receipt::Response {}.into()) -} diff --git a/src/client_server/sync.rs b/src/client_server/sync.rs deleted file mode 100644 index 65c07bc9..00000000 --- a/src/client_server/sync.rs +++ /dev/null @@ -1,808 +0,0 @@ -use crate::{database::DatabaseGuard, ConduitResult, Database, Error, Result, Ruma, RumaResponse}; -use ruma::{ - api::client::r0::{sync::sync_events, uiaa::UiaaResponse}, - events::{ - room::member::{MembershipState, RoomMemberEventContent}, - AnySyncEphemeralRoomEvent, EventType, - }, - serde::Raw, - DeviceId, RoomId, UserId, -}; -use std::{ - collections::{hash_map::Entry, BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, - sync::Arc, - time::Duration, -}; -use tokio::sync::watch::Sender; -use tracing::error; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, tokio}; - -/// # `GET /_matrix/client/r0/sync` -/// -/// Synchronize the client's state with the latest state on the server. -/// -/// - This endpoint takes a `since` parameter which should be the `next_batch` value from a -/// previous request for incremental syncs. -/// -/// Calling this endpoint without a `since` parameter returns: -/// - Some of the most recent events of each timeline -/// - Notification counts for each room -/// - Joined and invited member counts, heroes -/// - All state events -/// -/// Calling this endpoint with a `since` parameter from a previous `next_batch` returns: -/// For joined rooms: -/// - Some of the most recent events of each timeline that happened after since -/// - If user joined the room after since: All state events and device list updates in that room -/// - If the user was already in the room: A list of all events that are in the state now, but were -/// not in the state at `since` -/// - If the state we send contains a member event: Joined and invited member counts, heroes -/// - Device list updates that happened after `since` -/// - If there are events in the timeline we send or the user send updated his read mark: Notification counts -/// - EDUs that are active now (read receipts, typing updates, presence) -/// -/// For invited rooms: -/// - If the user was invited after `since`: A subset of the state of the room at the point of the invite -/// -/// For left rooms: -/// - If the user left after `since`: prev_batch token, empty state (TODO: subset of the state at the point of the leave) -/// -/// - Sync is handled in an async task, multiple requests from the same device with the same -/// `since` will be cached -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/sync", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn sync_events_route( - db: DatabaseGuard, - body: Ruma>, -) -> Result, RumaResponse> { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - let sender_device = body.sender_device.as_ref().expect("user is authenticated"); - - let arc_db = Arc::new(db); - - let mut rx = match arc_db - .globals - .sync_receivers - .write() - .unwrap() - .entry((sender_user.clone(), sender_device.clone())) - { - Entry::Vacant(v) => { - let (tx, rx) = tokio::sync::watch::channel(None); - - tokio::spawn(sync_helper_wrapper( - Arc::clone(&arc_db), - sender_user.clone(), - sender_device.clone(), - body.since.clone(), - body.full_state, - body.timeout, - tx, - )); - - v.insert((body.since.clone(), rx)).1.clone() - } - Entry::Occupied(mut o) => { - if o.get().0 != body.since { - let (tx, rx) = tokio::sync::watch::channel(None); - - tokio::spawn(sync_helper_wrapper( - Arc::clone(&arc_db), - sender_user.clone(), - sender_device.clone(), - body.since.clone(), - body.full_state, - body.timeout, - tx, - )); - - o.insert((body.since.clone(), rx.clone())); - - rx - } else { - o.get().1.clone() - } - } - }; - - let we_have_to_wait = rx.borrow().is_none(); - if we_have_to_wait { - if let Err(e) = rx.changed().await { - error!("Error waiting for sync: {}", e); - } - } - - let result = match rx - .borrow() - .as_ref() - .expect("When sync channel changes it's always set to some") - { - Ok(response) => Ok(response.clone()), - Err(error) => Err(error.to_response()), - }; - - result -} - -async fn sync_helper_wrapper( - db: Arc, - sender_user: UserId, - sender_device: Box, - since: Option, - full_state: bool, - timeout: Option, - tx: Sender>>, -) { - let r = sync_helper( - Arc::clone(&db), - sender_user.clone(), - sender_device.clone(), - since.clone(), - full_state, - timeout, - ) - .await; - - if let Ok((_, caching_allowed)) = r { - if !caching_allowed { - match db - .globals - .sync_receivers - .write() - .unwrap() - .entry((sender_user, sender_device)) - { - Entry::Occupied(o) => { - // Only remove if the device didn't start a different /sync already - if o.get().0 == since { - o.remove(); - } - } - Entry::Vacant(_) => {} - } - } - } - - drop(db); - - let _ = tx.send(Some(r.map(|(r, _)| r.into()))); -} - -async fn sync_helper( - db: Arc, - sender_user: UserId, - sender_device: Box, - since: Option, - full_state: bool, - timeout: Option, - // bool = caching allowed -) -> Result<(sync_events::Response, bool), Error> { - // TODO: match body.set_presence { - db.rooms.edus.ping_presence(&sender_user)?; - - // Setup watchers, so if there's no response, we can wait for them - let watcher = db.watch(&sender_user, &sender_device); - - let next_batch = db.globals.current_count()?; - let next_batch_string = next_batch.to_string(); - - let mut joined_rooms = BTreeMap::new(); - let since = since - .clone() - .and_then(|string| string.parse().ok()) - .unwrap_or(0); - - let mut presence_updates = HashMap::new(); - let mut left_encrypted_users = HashSet::new(); // Users that have left any encrypted rooms the sender was in - let mut device_list_updates = HashSet::new(); - let mut device_list_left = HashSet::new(); - - // Look for device list updates of this account - device_list_updates.extend( - db.users - .keys_changed(&sender_user.to_string(), since, None) - .filter_map(|r| r.ok()), - ); - - let all_joined_rooms = db.rooms.rooms_joined(&sender_user).collect::>(); - for room_id in all_joined_rooms { - let room_id = room_id?; - - // Get and drop the lock to wait for remaining operations to finish - // This will make sure the we have all events until next_batch - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); - - let mut non_timeline_pdus = db - .rooms - .pdus_until(&sender_user, &room_id, u64::MAX)? - .filter_map(|r| { - // Filter out buggy events - if r.is_err() { - error!("Bad pdu in pdus_since: {:?}", r); - } - r.ok() - }) - .take_while(|(pduid, _)| { - db.rooms - .pdu_count(pduid) - .map_or(false, |count| count > since) - }); - - // Take the last 10 events for the timeline - let timeline_pdus: Vec<_> = non_timeline_pdus - .by_ref() - .take(10) - .collect::>() - .into_iter() - .rev() - .collect(); - - let send_notification_counts = !timeline_pdus.is_empty() - || db - .rooms - .edus - .last_privateread_update(&sender_user, &room_id)? - > since; - - // They /sync response doesn't always return all messages, so we say the output is - // limited unless there are events in non_timeline_pdus - let limited = non_timeline_pdus.next().is_some(); - - // Database queries: - - let current_shortstatehash = db - .rooms - .current_shortstatehash(&room_id)? - .expect("All rooms have state"); - - let since_shortstatehash = db.rooms.get_token_shortstatehash(&room_id, since)?; - - // Calculates joined_member_count, invited_member_count and heroes - let calculate_counts = || { - let joined_member_count = db.rooms.room_joined_count(&room_id)?.unwrap_or(0); - let invited_member_count = db.rooms.room_invited_count(&room_id)?.unwrap_or(0); - - // Recalculate heroes (first 5 members) - let mut heroes = Vec::new(); - - if joined_member_count + invited_member_count <= 5 { - // Go through all PDUs and for each member event, check if the user is still joined or - // invited until we have 5 or we reach the end - - for hero in db - .rooms - .all_pdus(&sender_user, &room_id)? - .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus - .filter(|(_, pdu)| pdu.kind == EventType::RoomMember) - .map(|(_, pdu)| { - let content: RoomMemberEventContent = - serde_json::from_str(pdu.content.get()).map_err(|_| { - Error::bad_database("Invalid member event in database.") - })?; - - if let Some(state_key) = &pdu.state_key { - let user_id = UserId::try_from(state_key.clone()).map_err(|_| { - Error::bad_database("Invalid UserId in member PDU.") - })?; - - // The membership was and still is invite or join - if matches!( - content.membership, - MembershipState::Join | MembershipState::Invite - ) && (db.rooms.is_joined(&user_id, &room_id)? - || db.rooms.is_invited(&user_id, &room_id)?) - { - Ok::<_, Error>(Some(state_key.clone())) - } else { - Ok(None) - } - } else { - Ok(None) - } - }) - // Filter out buggy users - .filter_map(|u| u.ok()) - // Filter for possible heroes - .flatten() - { - if heroes.contains(&hero) || hero == sender_user.as_str() { - continue; - } - - heroes.push(hero); - } - } - - Ok::<_, Error>(( - Some(joined_member_count), - Some(invited_member_count), - heroes, - )) - }; - - let ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) = if since_shortstatehash.is_none() { - // Probably since = 0, we will do an initial sync - let (joined_member_count, invited_member_count, heroes) = calculate_counts()?; - - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - let state_events: Vec<_> = current_state_ids - .iter() - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect(); - - ( - heroes, - joined_member_count, - invited_member_count, - true, - state_events, - ) - } else if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { - // No state changes - (Vec::new(), None, None, false, Vec::new()) - } else { - // Incremental /sync - let since_shortstatehash = since_shortstatehash.unwrap(); - - let since_sender_member: Option = db - .rooms - .state_get( - since_shortstatehash, - &EventType::RoomMember, - sender_user.as_str(), - )? - .and_then(|pdu| { - serde_json::from_str(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid PDU in database.")) - .ok() - }); - - let joined_since_last_sync = since_sender_member - .map_or(true, |member| member.membership != MembershipState::Join); - - let current_state_ids = db.rooms.state_full_ids(current_shortstatehash)?; - - let since_state_ids = db.rooms.state_full_ids(since_shortstatehash)?; - - let state_events = if joined_since_last_sync { - current_state_ids - .iter() - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect::>() - } else { - current_state_ids - .iter() - .filter(|(key, id)| since_state_ids.get(key) != Some(id)) - .map(|(_, id)| db.rooms.get_pdu(id)) - .filter_map(|r| r.ok().flatten()) - .collect() - }; - - let encrypted_room = db - .rooms - .state_get(current_shortstatehash, &EventType::RoomEncryption, "")? - .is_some(); - - let since_encryption = - db.rooms - .state_get(since_shortstatehash, &EventType::RoomEncryption, "")?; - - // Calculations: - let new_encrypted_room = encrypted_room && since_encryption.is_none(); - - let send_member_count = state_events - .iter() - .any(|event| event.kind == EventType::RoomMember); - - if encrypted_room { - for state_event in &state_events { - if state_event.kind != EventType::RoomMember { - continue; - } - - if let Some(state_key) = &state_event.state_key { - let user_id = UserId::try_from(state_key.clone()) - .map_err(|_| Error::bad_database("Invalid UserId in member PDU."))?; - - if user_id == sender_user { - continue; - } - - let new_membership = serde_json::from_str::( - state_event.content.get(), - ) - .map_err(|_| Error::bad_database("Invalid PDU in database."))? - .membership; - - match new_membership { - MembershipState::Join => { - // A new user joined an encrypted room - if !share_encrypted_room(&db, &sender_user, &user_id, &room_id)? { - device_list_updates.insert(user_id); - } - } - MembershipState::Leave => { - // Write down users that have left encrypted rooms we are in - left_encrypted_users.insert(user_id); - } - _ => {} - } - } - } - } - - if joined_since_last_sync && encrypted_room || new_encrypted_room { - // If the user is in a new encrypted room, give them all joined users - device_list_updates.extend( - db.rooms - .room_members(&room_id) - .flatten() - .filter(|user_id| { - // Don't send key updates from the sender to the sender - &sender_user != user_id - }) - .filter(|user_id| { - // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&db, &sender_user, user_id, &room_id) - .unwrap_or(false) - }), - ); - } - - let (joined_member_count, invited_member_count, heroes) = if send_member_count { - calculate_counts()? - } else { - (None, None, Vec::new()) - }; - - ( - heroes, - joined_member_count, - invited_member_count, - joined_since_last_sync, - state_events, - ) - }; - - // Look for device list updates in this room - device_list_updates.extend( - db.users - .keys_changed(&room_id.to_string(), since, None) - .filter_map(|r| r.ok()), - ); - - let notification_count = if send_notification_counts { - Some( - db.rooms - .notification_count(&sender_user, &room_id)? - .try_into() - .expect("notification count can't go that high"), - ) - } else { - None - }; - - let highlight_count = if send_notification_counts { - Some( - db.rooms - .highlight_count(&sender_user, &room_id)? - .try_into() - .expect("highlight count can't go that high"), - ) - } else { - None - }; - - let prev_batch = timeline_pdus - .first() - .map_or(Ok::<_, Error>(None), |(pdu_id, _)| { - Ok(Some(db.rooms.pdu_count(pdu_id)?.to_string())) - })?; - - let room_events: Vec<_> = timeline_pdus - .iter() - .map(|(_, pdu)| pdu.to_sync_room_event()) - .collect(); - - let mut edus: Vec<_> = db - .rooms - .edus - .readreceipts_since(&room_id, since) - .filter_map(|r| r.ok()) // Filter out buggy events - .map(|(_, _, v)| v) - .collect(); - - if db.rooms.edus.last_typing_update(&room_id, &db.globals)? > since { - edus.push( - serde_json::from_str( - &serde_json::to_string(&AnySyncEphemeralRoomEvent::Typing( - db.rooms.edus.typings_all(&room_id)?, - )) - .expect("event is valid, we just created it"), - ) - .expect("event is valid, we just created it"), - ); - } - - // Save the state after this sync so we can send the correct state diff next sync - db.rooms - .associate_token_shortstatehash(&room_id, next_batch, current_shortstatehash)?; - - let joined_room = sync_events::JoinedRoom { - account_data: sync_events::RoomAccountData { - events: db - .account_data - .changes_since(Some(&room_id), &sender_user, since)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| Error::bad_database("Invalid account event in database.")) - .ok() - }) - .collect(), - }, - summary: sync_events::RoomSummary { - heroes, - joined_member_count: joined_member_count.map(|n| (n as u32).into()), - invited_member_count: invited_member_count.map(|n| (n as u32).into()), - }, - unread_notifications: sync_events::UnreadNotificationsCount { - highlight_count, - notification_count, - }, - timeline: sync_events::Timeline { - limited: limited || joined_since_last_sync, - prev_batch, - events: room_events, - }, - state: sync_events::State { - events: state_events - .iter() - .map(|pdu| pdu.to_sync_state_event()) - .collect(), - }, - ephemeral: sync_events::Ephemeral { events: edus }, - }; - - if !joined_room.is_empty() { - joined_rooms.insert(room_id.clone(), joined_room); - } - - // Take presence updates from this room - for (user_id, presence) in - db.rooms - .edus - .presence_since(&room_id, since, &db.rooms, &db.globals)? - { - match presence_updates.entry(user_id) { - Entry::Vacant(v) => { - v.insert(presence); - } - Entry::Occupied(mut o) => { - let p = o.get_mut(); - - // Update existing presence event with more info - p.content.presence = presence.content.presence; - if let Some(status_msg) = presence.content.status_msg { - p.content.status_msg = Some(status_msg); - } - if let Some(last_active_ago) = presence.content.last_active_ago { - p.content.last_active_ago = Some(last_active_ago); - } - if let Some(displayname) = presence.content.displayname { - p.content.displayname = Some(displayname); - } - if let Some(avatar_url) = presence.content.avatar_url { - p.content.avatar_url = Some(avatar_url); - } - if let Some(currently_active) = presence.content.currently_active { - p.content.currently_active = Some(currently_active); - } - } - } - } - } - - let mut left_rooms = BTreeMap::new(); - let all_left_rooms: Vec<_> = db.rooms.rooms_left(&sender_user).collect(); - for result in all_left_rooms { - let (room_id, left_state_events) = result?; - - // Get and drop the lock to wait for remaining operations to finish - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); - - let left_count = db.rooms.get_left_count(&room_id, &sender_user)?; - - // Left before last sync - if Some(since) >= left_count { - continue; - } - - left_rooms.insert( - room_id.clone(), - sync_events::LeftRoom { - account_data: sync_events::RoomAccountData { events: Vec::new() }, - timeline: sync_events::Timeline { - limited: false, - prev_batch: Some(next_batch_string.clone()), - events: Vec::new(), - }, - state: sync_events::State { - events: left_state_events, - }, - }, - ); - } - - let mut invited_rooms = BTreeMap::new(); - let all_invited_rooms: Vec<_> = db.rooms.rooms_invited(&sender_user).collect(); - for result in all_invited_rooms { - let (room_id, invite_state_events) = result?; - - // Get and drop the lock to wait for remaining operations to finish - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - drop(insert_lock); - - let invite_count = db.rooms.get_invite_count(&room_id, &sender_user)?; - - // Invited before last sync - if Some(since) >= invite_count { - continue; - } - - invited_rooms.insert( - room_id.clone(), - sync_events::InvitedRoom { - invite_state: sync_events::InviteState { - events: invite_state_events, - }, - }, - ); - } - - for user_id in left_encrypted_users { - let still_share_encrypted_room = db - .rooms - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? - .filter_map(|r| r.ok()) - .filter_map(|other_room_id| { - Some( - db.rooms - .room_state_get(&other_room_id, &EventType::RoomEncryption, "") - .ok()? - .is_some(), - ) - }) - .all(|encrypted| !encrypted); - // If the user doesn't share an encrypted room with the target anymore, we need to tell - // them - if still_share_encrypted_room { - device_list_left.insert(user_id); - } - } - - // Remove all to-device events the device received *last time* - db.users - .remove_to_device_events(&sender_user, &sender_device, since)?; - - let response = sync_events::Response { - next_batch: next_batch_string, - rooms: sync_events::Rooms { - leave: left_rooms, - join: joined_rooms, - invite: invited_rooms, - knock: BTreeMap::new(), // TODO - }, - presence: sync_events::Presence { - events: presence_updates - .into_iter() - .map(|(_, v)| Raw::new(&v).expect("PresenceEvent always serializes successfully")) - .collect(), - }, - account_data: sync_events::GlobalAccountData { - events: db - .account_data - .changes_since(None, &sender_user, since)? - .into_iter() - .filter_map(|(_, v)| { - serde_json::from_str(v.json().get()) - .map_err(|_| Error::bad_database("Invalid account event in database.")) - .ok() - }) - .collect(), - }, - device_lists: sync_events::DeviceLists { - changed: device_list_updates.into_iter().collect(), - left: device_list_left.into_iter().collect(), - }, - device_one_time_keys_count: db.users.count_one_time_keys(&sender_user, &sender_device)?, - to_device: sync_events::ToDevice { - events: db - .users - .get_to_device_events(&sender_user, &sender_device)?, - }, - }; - - // TODO: Retry the endpoint instead of returning (waiting for #118) - if !full_state - && response.rooms.is_empty() - && response.presence.is_empty() - && response.account_data.is_empty() - && response.device_lists.is_empty() - && response.to_device.is_empty() - { - // Hang a few seconds so requests are not spammed - // Stop hanging if new info arrives - let mut duration = timeout.unwrap_or_default(); - if duration.as_secs() > 30 { - duration = Duration::from_secs(30); - } - let _ = tokio::time::timeout(duration, watcher).await; - Ok((response, false)) - } else { - Ok((response, since != next_batch)) // Only cache if we made progress - } -} - -#[tracing::instrument(skip(db))] -fn share_encrypted_room( - db: &Database, - sender_user: &UserId, - user_id: &UserId, - ignore_room: &RoomId, -) -> Result { - Ok(db - .rooms - .get_shared_rooms(vec![sender_user.clone(), user_id.clone()])? - .filter_map(|r| r.ok()) - .filter(|room_id| room_id != ignore_room) - .filter_map(|other_room_id| { - Some( - db.rooms - .room_state_get(&other_room_id, &EventType::RoomEncryption, "") - .ok()? - .is_some(), - ) - }) - .any(|encrypted| encrypted)) -} diff --git a/src/client_server/tag.rs b/src/client_server/tag.rs deleted file mode 100644 index 42bad4cf..00000000 --- a/src/client_server/tag.rs +++ /dev/null @@ -1,124 +0,0 @@ -use crate::{database::DatabaseGuard, ConduitResult, Ruma}; -use ruma::{ - api::client::r0::tag::{create_tag, delete_tag, get_tags}, - events::{ - tag::{TagEvent, TagEventContent}, - EventType, - }, -}; -use std::collections::BTreeMap; - -#[cfg(feature = "conduit_bin")] -use rocket::{delete, get, put}; - -/// # `PUT /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` -/// -/// Adds a tag to the room. -/// -/// - Inserts the tag into the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn update_tag_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut tags_event = db - .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? - .unwrap_or_else(|| TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }); - tags_event - .content - .tags - .insert(body.tag.clone().into(), body.tag_info.clone()); - - db.account_data.update( - Some(&body.room_id), - sender_user, - EventType::Tag, - &tags_event, - &db.globals, - )?; - - db.flush()?; - - Ok(create_tag::Response {}.into()) -} - -/// # `DELETE /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags/{tag}` -/// -/// Deletes a tag from the room. -/// -/// - Removes the tag from the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - delete("/_matrix/client/r0/user/<_>/rooms/<_>/tags/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn delete_tag_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - let mut tags_event = db - .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? - .unwrap_or_else(|| TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }); - tags_event.content.tags.remove(&body.tag.clone().into()); - - db.account_data.update( - Some(&body.room_id), - sender_user, - EventType::Tag, - &tags_event, - &db.globals, - )?; - - db.flush()?; - - Ok(delete_tag::Response {}.into()) -} - -/// # `GET /_matrix/client/r0/user/{userId}/rooms/{roomId}/tags` -/// -/// Returns tags on the room. -/// -/// - Gets the tag event of the room account data. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/user/<_>/rooms/<_>/tags", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_tags_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - Ok(get_tags::Response { - tags: db - .account_data - .get(Some(&body.room_id), sender_user, EventType::Tag)? - .unwrap_or_else(|| TagEvent { - content: TagEventContent { - tags: BTreeMap::new(), - }, - }) - .content - .tags, - } - .into()) -} diff --git a/src/client_server/thirdparty.rs b/src/client_server/thirdparty.rs deleted file mode 100644 index 4305902f..00000000 --- a/src/client_server/thirdparty.rs +++ /dev/null @@ -1,22 +0,0 @@ -use crate::ConduitResult; -use ruma::api::client::r0::thirdparty::get_protocols; - -#[cfg(feature = "conduit_bin")] -use rocket::get; -use std::collections::BTreeMap; - -/// # `GET /_matrix/client/r0/thirdparty/protocols` -/// -/// TODO: Fetches all metadata about protocols supported by the homeserver. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/client/r0/thirdparty/protocols") -)] -#[tracing::instrument] -pub async fn get_protocols_route() -> ConduitResult { - // TODO - Ok(get_protocols::Response { - protocols: BTreeMap::new(), - } - .into()) -} diff --git a/src/client_server/typing.rs b/src/client_server/typing.rs deleted file mode 100644 index 15e74b35..00000000 --- a/src/client_server/typing.rs +++ /dev/null @@ -1,36 +0,0 @@ -use crate::{database::DatabaseGuard, utils, ConduitResult, Ruma}; -use create_typing_event::Typing; -use ruma::api::client::r0::typing::create_typing_event; - -#[cfg(feature = "conduit_bin")] -use rocket::put; - -/// # `PUT /_matrix/client/r0/rooms/{roomId}/typing/{userId}` -/// -/// Sets the typing state of the sender user. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/client/r0/rooms/<_>/typing/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn create_typing_event_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let sender_user = body.sender_user.as_ref().expect("user is authenticated"); - - if let Typing::Yes(duration) = body.state { - db.rooms.edus.typing_add( - sender_user, - &body.room_id, - duration.as_millis() as u64 + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms - .edus - .typing_remove(sender_user, &body.room_id, &db.globals)?; - } - - Ok(create_typing_event::Response {}.into()) -} diff --git a/src/client_server/unversioned.rs b/src/client_server/unversioned.rs deleted file mode 100644 index f2624bbc..00000000 --- a/src/client_server/unversioned.rs +++ /dev/null @@ -1,27 +0,0 @@ -use crate::ConduitResult; -use ruma::api::client::unversioned::get_supported_versions; - -#[cfg(feature = "conduit_bin")] -use rocket::get; - -/// # `GET /_matrix/client/versions` -/// -/// Get the versions of the specification and unstable features supported by this server. -/// -/// - Versions take the form MAJOR.MINOR.PATCH -/// - Only the latest PATCH release will be reported for each MAJOR.MINOR value -/// - Unstable features are namespaced and may include version information in their name -/// -/// Note: Unstable features are used while developing new features. Clients should avoid using -/// unstable features in their stable releases -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/versions"))] -#[tracing::instrument] -pub async fn get_supported_versions_route() -> ConduitResult { - let mut resp = - get_supported_versions::Response::new(vec!["r0.5.0".to_owned(), "r0.6.0".to_owned()]); - - resp.unstable_features - .insert("org.matrix.e2e_cross_signing".to_owned(), true); - - Ok(resp.into()) -} diff --git a/src/client_server/user_directory.rs b/src/client_server/user_directory.rs deleted file mode 100644 index cfcb9bb9..00000000 --- a/src/client_server/user_directory.rs +++ /dev/null @@ -1,59 +0,0 @@ -use crate::{database::DatabaseGuard, ConduitResult, Ruma}; -use ruma::api::client::r0::user_directory::search_users; - -#[cfg(feature = "conduit_bin")] -use rocket::post; - -/// # `POST /_matrix/client/r0/user_directory/search` -/// -/// Searches all known users for a match. -/// -/// - TODO: Hide users that are not in any public rooms? -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/client/r0/user_directory/search", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn search_users_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let limit = u64::from(body.limit) as usize; - - let mut users = db.users.iter().filter_map(|user_id| { - // Filter out buggy users (they should not exist, but you never know...) - let user_id = user_id.ok()?; - - let user = search_users::User { - user_id: user_id.clone(), - display_name: db.users.displayname(&user_id).ok()?, - avatar_url: db.users.avatar_url(&user_id).ok()?, - }; - - let user_id_matches = user - .user_id - .to_string() - .to_lowercase() - .contains(&body.search_term.to_lowercase()); - - let user_displayname_matches = user - .display_name - .as_ref() - .filter(|name| { - name.to_lowercase() - .contains(&body.search_term.to_lowercase()) - }) - .is_some(); - - if !user_id_matches && !user_displayname_matches { - return None; - } - - Some(user) - }); - - let results = users.by_ref().take(limit).collect(); - let limited = users.next().is_some(); - - Ok(search_users::Response { results, limited }.into()) -} diff --git a/src/client_server/voip.rs b/src/client_server/voip.rs deleted file mode 100644 index 2a7f28e1..00000000 --- a/src/client_server/voip.rs +++ /dev/null @@ -1,21 +0,0 @@ -use crate::ConduitResult; -use ruma::api::client::r0::voip::get_turn_server_info; -use std::time::Duration; - -#[cfg(feature = "conduit_bin")] -use rocket::get; - -/// # `GET /_matrix/client/r0/voip/turnServer` -/// -/// TODO: Returns information about the recommended turn server. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/client/r0/voip/turnServer"))] -#[tracing::instrument] -pub async fn turn_server_route() -> ConduitResult { - Ok(get_turn_server_info::Response { - username: "".to_owned(), - password: "".to_owned(), - uris: Vec::new(), - ttl: Duration::from_secs(60 * 60 * 24), - } - .into()) -} diff --git a/src/config/mod.rs b/src/config/mod.rs new file mode 100644 index 00000000..3c3a764a --- /dev/null +++ b/src/config/mod.rs @@ -0,0 +1,263 @@ +use std::{ + collections::BTreeMap, + fmt, + net::{IpAddr, Ipv4Addr}, +}; + +use ruma::{OwnedServerName, RoomVersionId}; +use serde::{de::IgnoredAny, Deserialize}; +use tracing::warn; + +mod proxy; + +use self::proxy::ProxyConfig; + +#[derive(Clone, Debug, Deserialize)] +pub struct Config { + #[serde(default = "default_address")] + pub address: IpAddr, + #[serde(default = "default_port")] + pub port: u16, + pub tls: Option, + + pub server_name: OwnedServerName, + #[serde(default = "default_database_backend")] + pub database_backend: String, + pub database_path: String, + #[serde(default = "default_db_cache_capacity_mb")] + pub db_cache_capacity_mb: f64, + #[serde(default = "true_fn")] + pub enable_lightning_bolt: bool, + #[serde(default = "default_conduit_cache_capacity_modifier")] + pub conduit_cache_capacity_modifier: f64, + #[serde(default = "default_rocksdb_max_open_files")] + pub rocksdb_max_open_files: i32, + #[serde(default = "default_pdu_cache_capacity")] + pub pdu_cache_capacity: u32, + #[serde(default = "default_cleanup_second_interval")] + pub cleanup_second_interval: u32, + #[serde(default = "default_max_request_size")] + pub max_request_size: u32, + #[serde(default = "default_max_concurrent_requests")] + pub max_concurrent_requests: u16, + #[serde(default = "false_fn")] + pub allow_registration: bool, + #[serde(default = "true_fn")] + pub allow_encryption: bool, + #[serde(default = "false_fn")] + pub allow_federation: bool, + #[serde(default = "true_fn")] + pub allow_room_creation: bool, + #[serde(default = "true_fn")] + pub allow_unstable_room_versions: bool, + #[serde(default = "default_default_room_version")] + pub default_room_version: RoomVersionId, + #[serde(default = "false_fn")] + pub allow_jaeger: bool, + #[serde(default = "false_fn")] + pub tracing_flame: bool, + #[serde(default)] + pub proxy: ProxyConfig, + pub jwt_secret: Option, + #[serde(default = "Vec::new")] + pub trusted_servers: Vec, + #[serde(default = "default_log")] + pub log: String, + #[serde(default)] + pub turn_username: String, + #[serde(default)] + pub turn_password: String, + #[serde(default = "Vec::new")] + pub turn_uris: Vec, + #[serde(default)] + pub turn_secret: String, + #[serde(default = "default_turn_ttl")] + pub turn_ttl: u64, + + pub emergency_password: Option, + + #[serde(flatten)] + pub catchall: BTreeMap, +} + +#[derive(Clone, Debug, Deserialize)] +pub struct TlsConfig { + pub certs: String, + pub key: String, +} + +const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; + +impl Config { + pub fn warn_deprecated(&self) { + let mut was_deprecated = false; + for key in self + .catchall + .keys() + .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) + { + warn!("Config parameter {} is deprecated", key); + was_deprecated = true; + } + + if was_deprecated { + warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); + } + } +} + +impl fmt::Display for Config { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + // Prepare a list of config values to show + let lines = [ + ("Server name", self.server_name.host()), + ("Database backend", &self.database_backend), + ("Database path", &self.database_path), + ( + "Database cache capacity (MB)", + &self.db_cache_capacity_mb.to_string(), + ), + ( + "Cache capacity modifier", + &self.conduit_cache_capacity_modifier.to_string(), + ), + #[cfg(feature = "rocksdb")] + ( + "Maximum open files for RocksDB", + &self.rocksdb_max_open_files.to_string(), + ), + ("PDU cache capacity", &self.pdu_cache_capacity.to_string()), + ( + "Cleanup interval in seconds", + &self.cleanup_second_interval.to_string(), + ), + ("Maximum request size", &self.max_request_size.to_string()), + ( + "Maximum concurrent requests", + &self.max_concurrent_requests.to_string(), + ), + ("Allow registration", &self.allow_registration.to_string()), + ( + "Enabled lightning bolt", + &self.enable_lightning_bolt.to_string(), + ), + ("Allow encryption", &self.allow_encryption.to_string()), + ("Allow federation", &self.allow_federation.to_string()), + ("Allow room creation", &self.allow_room_creation.to_string()), + ( + "JWT secret", + match self.jwt_secret { + Some(_) => "set", + None => "not set", + }, + ), + ("Trusted servers", { + let mut lst = vec![]; + for server in &self.trusted_servers { + lst.push(server.host()); + } + &lst.join(", ") + }), + ( + "TURN username", + if self.turn_username.is_empty() { + "not set" + } else { + &self.turn_username + }, + ), + ("TURN password", { + if self.turn_password.is_empty() { + "not set" + } else { + "set" + } + }), + ("TURN secret", { + if self.turn_secret.is_empty() { + "not set" + } else { + "set" + } + }), + ("Turn TTL", &self.turn_ttl.to_string()), + ("Turn URIs", { + let mut lst = vec![]; + for item in self.turn_uris.to_vec().into_iter().enumerate() { + let (_, uri): (usize, String) = item; + lst.push(uri); + } + &lst.join(", ") + }), + ]; + + let mut msg: String = "Active config values:\n\n".to_string(); + + for line in lines.into_iter().enumerate() { + msg += &format!("{}: {}\n", line.1 .0, line.1 .1); + } + + write!(f, "{}", msg) + } +} + +fn false_fn() -> bool { + false +} + +fn true_fn() -> bool { + true +} + +fn default_address() -> IpAddr { + Ipv4Addr::LOCALHOST.into() +} + +fn default_port() -> u16 { + 8000 +} + +fn default_database_backend() -> String { + "sqlite".to_owned() +} + +fn default_db_cache_capacity_mb() -> f64 { + 10.0 +} + +fn default_conduit_cache_capacity_modifier() -> f64 { + 1.0 +} + +fn default_rocksdb_max_open_files() -> i32 { + 20 +} + +fn default_pdu_cache_capacity() -> u32 { + 150_000 +} + +fn default_cleanup_second_interval() -> u32 { + 60 // every minute +} + +fn default_max_request_size() -> u32 { + 20 * 1024 * 1024 // Default to 20 MB +} + +fn default_max_concurrent_requests() -> u16 { + 100 +} + +fn default_log() -> String { + "warn,state_res=warn,_=off,sled=off".to_owned() +} + +fn default_turn_ttl() -> u64 { + 60 * 60 * 24 +} + +// I know, it's a great name +pub fn default_default_room_version() -> RoomVersionId { + RoomVersionId::V9 +} diff --git a/src/database/proxy.rs b/src/config/proxy.rs similarity index 98% rename from src/database/proxy.rs rename to src/config/proxy.rs index fb0387c9..dcf304e9 100644 --- a/src/database/proxy.rs +++ b/src/config/proxy.rs @@ -10,13 +10,13 @@ use crate::Result; /// ``` /// - Global proxy /// ```toml -/// [proxy] +/// [global.proxy] /// global = { url = "socks5h://localhost:9050" } /// ``` /// - Proxy some domains /// ```toml -/// [proxy] -/// [[proxy.by_domain]] +/// [global.proxy] +/// [[global.proxy.by_domain]] /// url = "socks5h://localhost:9050" /// include = ["*.onion", "matrix.myspecial.onion"] /// exclude = ["*.myspecial.onion"] diff --git a/src/database.rs b/src/database.rs deleted file mode 100644 index 8cf4f640..00000000 --- a/src/database.rs +++ /dev/null @@ -1,977 +0,0 @@ -pub mod abstraction; - -pub mod account_data; -pub mod admin; -pub mod appservice; -pub mod globals; -pub mod key_backups; -pub mod media; -pub mod proxy; -pub mod pusher; -pub mod rooms; -pub mod sending; -pub mod transaction_ids; -pub mod uiaa; -pub mod users; - -use crate::{utils, Error, Result}; -use abstraction::DatabaseEngine; -use directories::ProjectDirs; -use lru_cache::LruCache; -use rocket::{ - futures::{channel::mpsc, stream::FuturesUnordered, StreamExt}, - outcome::{try_outcome, IntoOutcome}, - request::{FromRequest, Request}, - Shutdown, State, -}; -use ruma::{DeviceId, EventId, RoomId, ServerName, UserId}; -use serde::{de::IgnoredAny, Deserialize}; -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, - fs::{self, remove_dir_all}, - io::Write, - mem::size_of, - ops::Deref, - path::Path, - sync::{Arc, Mutex, RwLock}, -}; -use tokio::sync::{OwnedRwLockReadGuard, RwLock as TokioRwLock, Semaphore}; -use tracing::{debug, error, warn}; - -use self::proxy::ProxyConfig; - -#[derive(Clone, Debug, Deserialize)] -pub struct Config { - server_name: Box, - database_path: String, - #[serde(default = "default_db_cache_capacity_mb")] - db_cache_capacity_mb: f64, - #[serde(default = "default_pdu_cache_capacity")] - pdu_cache_capacity: u32, - #[serde(default = "default_sqlite_wal_clean_second_interval")] - sqlite_wal_clean_second_interval: u32, - #[serde(default = "default_max_request_size")] - max_request_size: u32, - #[serde(default = "default_max_concurrent_requests")] - max_concurrent_requests: u16, - #[serde(default = "false_fn")] - allow_registration: bool, - #[serde(default = "true_fn")] - allow_encryption: bool, - #[serde(default = "false_fn")] - allow_federation: bool, - #[serde(default = "true_fn")] - allow_room_creation: bool, - #[serde(default = "false_fn")] - pub allow_jaeger: bool, - #[serde(default = "false_fn")] - pub tracing_flame: bool, - #[serde(default)] - proxy: ProxyConfig, - jwt_secret: Option, - #[serde(default = "Vec::new")] - trusted_servers: Vec>, - #[serde(default = "default_log")] - pub log: String, - - #[serde(flatten)] - catchall: BTreeMap, -} - -const DEPRECATED_KEYS: &[&str] = &["cache_capacity"]; - -impl Config { - pub fn warn_deprecated(&self) { - let mut was_deprecated = false; - for key in self - .catchall - .keys() - .filter(|key| DEPRECATED_KEYS.iter().any(|s| s == key)) - { - warn!("Config parameter {} is deprecated", key); - was_deprecated = true; - } - - if was_deprecated { - warn!("Read conduit documentation and check your configuration if any new configuration parameters should be adjusted"); - } - } -} - -fn false_fn() -> bool { - false -} - -fn true_fn() -> bool { - true -} - -fn default_db_cache_capacity_mb() -> f64 { - 200.0 -} - -fn default_pdu_cache_capacity() -> u32 { - 100_000 -} - -fn default_sqlite_wal_clean_second_interval() -> u32 { - 1 * 60 // every minute -} - -fn default_max_request_size() -> u32 { - 20 * 1024 * 1024 // Default to 20 MB -} - -fn default_max_concurrent_requests() -> u16 { - 100 -} - -fn default_log() -> String { - "info,state_res=warn,rocket=off,_=off,sled=off".to_owned() -} - -#[cfg(feature = "sled")] -pub type Engine = abstraction::sled::Engine; - -#[cfg(feature = "sqlite")] -pub type Engine = abstraction::sqlite::Engine; - -#[cfg(feature = "heed")] -pub type Engine = abstraction::heed::Engine; - -pub struct Database { - _db: Arc, - pub globals: globals::Globals, - pub users: users::Users, - pub uiaa: uiaa::Uiaa, - pub rooms: rooms::Rooms, - pub account_data: account_data::AccountData, - pub media: media::Media, - pub key_backups: key_backups::KeyBackups, - pub transaction_ids: transaction_ids::TransactionIds, - pub sending: sending::Sending, - pub admin: admin::Admin, - pub appservice: appservice::Appservice, - pub pusher: pusher::PushData, -} - -impl Database { - /// Tries to remove the old database but ignores all errors. - pub fn try_remove(server_name: &str) -> Result<()> { - let mut path = ProjectDirs::from("xyz", "koesters", "conduit") - .ok_or_else(|| Error::bad_config("The OS didn't return a valid home directory path."))? - .data_dir() - .to_path_buf(); - path.push(server_name); - let _ = remove_dir_all(path); - - Ok(()) - } - - fn check_sled_or_sqlite_db(config: &Config) -> Result<()> { - #[cfg(feature = "backend_sqlite")] - { - let path = Path::new(&config.database_path); - - let sled_exists = path.join("db").exists(); - let sqlite_exists = path.join("conduit.db").exists(); - if sled_exists { - if sqlite_exists { - // most likely an in-place directory, only warn - warn!("Both sled and sqlite databases are detected in database directory"); - warn!("Currently running from the sqlite database, but consider removing sled database files to free up space") - } else { - error!( - "Sled database detected, conduit now uses sqlite for database operations" - ); - error!("This database must be converted to sqlite, go to https://github.com/ShadowJonathan/conduit_toolbox#conduit_sled_to_sqlite"); - return Err(Error::bad_config( - "sled database detected, migrate to sqlite", - )); - } - } - } - - Ok(()) - } - - /// Load an existing database or create a new one. - pub async fn load_or_create(config: &Config) -> Result>> { - Self::check_sled_or_sqlite_db(config)?; - - if !Path::new(&config.database_path).exists() { - std::fs::create_dir_all(&config.database_path) - .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; - } - - let builder = Engine::open(config)?; - - if config.max_request_size < 1024 { - eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); - } - - let (admin_sender, admin_receiver) = mpsc::unbounded(); - let (sending_sender, sending_receiver) = mpsc::unbounded(); - - let db = Arc::new(TokioRwLock::from(Self { - _db: builder.clone(), - users: users::Users { - userid_password: builder.open_tree("userid_password")?, - userid_displayname: builder.open_tree("userid_displayname")?, - userid_avatarurl: builder.open_tree("userid_avatarurl")?, - userid_blurhash: builder.open_tree("userid_blurhash")?, - userdeviceid_token: builder.open_tree("userdeviceid_token")?, - userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, - userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, - token_userdeviceid: builder.open_tree("token_userdeviceid")?, - onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, - userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, - keychangeid_userid: builder.open_tree("keychangeid_userid")?, - keyid_key: builder.open_tree("keyid_key")?, - userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, - userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, - userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, - todeviceid_events: builder.open_tree("todeviceid_events")?, - }, - uiaa: uiaa::Uiaa { - userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, - userdevicesessionid_uiaarequest: builder - .open_tree("userdevicesessionid_uiaarequest")?, - }, - rooms: rooms::Rooms { - edus: rooms::RoomEdus { - readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, - roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt - roomuserid_lastprivatereadupdate: builder - .open_tree("roomuserid_lastprivatereadupdate")?, - typingid_userid: builder.open_tree("typingid_userid")?, - roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, - presenceid_presence: builder.open_tree("presenceid_presence")?, - userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, - }, - pduid_pdu: builder.open_tree("pduid_pdu")?, - eventid_pduid: builder.open_tree("eventid_pduid")?, - roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, - - alias_roomid: builder.open_tree("alias_roomid")?, - aliasid_alias: builder.open_tree("aliasid_alias")?, - publicroomids: builder.open_tree("publicroomids")?, - - tokenids: builder.open_tree("tokenids")?, - - roomserverids: builder.open_tree("roomserverids")?, - serverroomids: builder.open_tree("serverroomids")?, - userroomid_joined: builder.open_tree("userroomid_joined")?, - roomuserid_joined: builder.open_tree("roomuserid_joined")?, - roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, - roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, - roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, - userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, - roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, - userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, - roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, - - userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, - userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, - - statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, - shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, - - shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, - - roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, - - shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, - eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, - shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, - shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, - roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, - roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, - statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, - - eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, - softfailedeventids: builder.open_tree("softfailedeventids")?, - - referencedevents: builder.open_tree("referencedevents")?, - pdu_cache: Mutex::new(LruCache::new( - config - .pdu_cache_capacity - .try_into() - .expect("pdu cache capacity fits into usize"), - )), - auth_chain_cache: Mutex::new(LruCache::new(1_000_000)), - shorteventid_cache: Mutex::new(LruCache::new(1_000_000)), - eventidshort_cache: Mutex::new(LruCache::new(1_000_000)), - shortstatekey_cache: Mutex::new(LruCache::new(1_000_000)), - statekeyshort_cache: Mutex::new(LruCache::new(1_000_000)), - our_real_users_cache: RwLock::new(HashMap::new()), - appservice_in_room_cache: RwLock::new(HashMap::new()), - stateinfo_cache: Mutex::new(LruCache::new(1000)), - }, - account_data: account_data::AccountData { - roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, - roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, - }, - media: media::Media { - mediaid_file: builder.open_tree("mediaid_file")?, - }, - key_backups: key_backups::KeyBackups { - backupid_algorithm: builder.open_tree("backupid_algorithm")?, - backupid_etag: builder.open_tree("backupid_etag")?, - backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, - }, - transaction_ids: transaction_ids::TransactionIds { - userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, - }, - sending: sending::Sending { - servername_educount: builder.open_tree("servername_educount")?, - servernameevent_data: builder.open_tree("servernameevent_data")?, - servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, - maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), - sender: sending_sender, - }, - admin: admin::Admin { - sender: admin_sender, - }, - appservice: appservice::Appservice { - cached_registrations: Arc::new(RwLock::new(HashMap::new())), - id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, - }, - pusher: pusher::PushData { - senderkey_pusher: builder.open_tree("senderkey_pusher")?, - }, - globals: globals::Globals::load( - builder.open_tree("global")?, - builder.open_tree("server_signingkeys")?, - config.clone(), - )?, - })); - - { - let db = db.read().await; - // MIGRATIONS - // TODO: database versions of new dbs should probably not be 0 - if db.globals.database_version()? < 1 { - for (roomserverid, _) in db.rooms.roomserverids.iter() { - let mut parts = roomserverid.split(|&b| b == 0xff); - let room_id = parts.next().expect("split always returns one element"); - let servername = match parts.next() { - Some(s) => s, - None => { - error!("Migration: Invalid roomserverid in db."); - continue; - } - }; - let mut serverroomid = servername.to_vec(); - serverroomid.push(0xff); - serverroomid.extend_from_slice(room_id); - - db.rooms.serverroomids.insert(&serverroomid, &[])?; - } - - db.globals.bump_database_version(1)?; - - println!("Migration: 0 -> 1 finished"); - } - - if db.globals.database_version()? < 2 { - // We accidentally inserted hashed versions of "" into the db instead of just "" - for (userid, password) in db.users.userid_password.iter() { - let password = utils::string_from_bytes(&password); - - let empty_hashed_password = password.map_or(false, |password| { - argon2::verify_encoded(&password, b"").unwrap_or(false) - }); - - if empty_hashed_password { - db.users.userid_password.insert(&userid, b"")?; - } - } - - db.globals.bump_database_version(2)?; - - println!("Migration: 1 -> 2 finished"); - } - - if db.globals.database_version()? < 3 { - // Move media to filesystem - for (key, content) in db.media.mediaid_file.iter() { - if content.is_empty() { - continue; - } - - let path = db.globals.get_media_file(&key); - let mut file = fs::File::create(path)?; - file.write_all(&content)?; - db.media.mediaid_file.insert(&key, &[])?; - } - - db.globals.bump_database_version(3)?; - - println!("Migration: 2 -> 3 finished"); - } - - if db.globals.database_version()? < 4 { - // Add federated users to db as deactivated - for our_user in db.users.iter() { - let our_user = our_user?; - if db.users.is_deactivated(&our_user)? { - continue; - } - for room in db.rooms.rooms_joined(&our_user) { - for user in db.rooms.room_members(&room?) { - let user = user?; - if user.server_name() != db.globals.server_name() { - println!("Migration: Creating user {}", user); - db.users.create(&user, None)?; - } - } - } - } - - db.globals.bump_database_version(4)?; - - println!("Migration: 3 -> 4 finished"); - } - - if db.globals.database_version()? < 5 { - // Upgrade user data store - for (roomuserdataid, _) in db.account_data.roomuserdataid_accountdata.iter() { - let mut parts = roomuserdataid.split(|&b| b == 0xff); - let room_id = parts.next().unwrap(); - let user_id = parts.next().unwrap(); - let event_type = roomuserdataid.rsplit(|&b| b == 0xff).next().unwrap(); - - let mut key = room_id.to_vec(); - key.push(0xff); - key.extend_from_slice(user_id); - key.push(0xff); - key.extend_from_slice(event_type); - - db.account_data - .roomusertype_roomuserdataid - .insert(&key, &roomuserdataid)?; - } - - db.globals.bump_database_version(5)?; - - println!("Migration: 4 -> 5 finished"); - } - - if db.globals.database_version()? < 6 { - // Set room member count - for (roomid, _) in db.rooms.roomid_shortstatehash.iter() { - let room_id = - RoomId::try_from(utils::string_from_bytes(&roomid).unwrap()).unwrap(); - - db.rooms.update_joined_count(&room_id, &db)?; - } - - db.globals.bump_database_version(6)?; - - println!("Migration: 5 -> 6 finished"); - } - - if db.globals.database_version()? < 7 { - // Upgrade state store - let mut last_roomstates: HashMap = HashMap::new(); - let mut current_sstatehash: Option = None; - let mut current_room = None; - let mut current_state = HashSet::new(); - let mut counter = 0; - - let mut handle_state = - |current_sstatehash: u64, - current_room: &RoomId, - current_state: HashSet<_>, - last_roomstates: &mut HashMap<_, _>| { - counter += 1; - println!("counter: {}", counter); - let last_roomsstatehash = last_roomstates.get(current_room); - - let states_parents = last_roomsstatehash.map_or_else( - || Ok(Vec::new()), - |&last_roomsstatehash| { - db.rooms.load_shortstatehash_info(dbg!(last_roomsstatehash)) - }, - )?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew = current_state - .difference(&parent_stateinfo.1) - .copied() - .collect::>(); - - let statediffremoved = parent_stateinfo - .1 - .difference(¤t_state) - .copied() - .collect::>(); - - (statediffnew, statediffremoved) - } else { - (current_state, HashSet::new()) - }; - - db.rooms.save_state_from_diff( - dbg!(current_sstatehash), - statediffnew, - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - - /* - let mut tmp = db.rooms.load_shortstatehash_info(¤t_sstatehash, &db)?; - let state = tmp.pop().unwrap(); - println!( - "{}\t{}{:?}: {:?} + {:?} - {:?}", - current_room, - " ".repeat(tmp.len()), - utils::u64_from_bytes(¤t_sstatehash).unwrap(), - tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), - state - .2 - .iter() - .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) - .collect::>(), - state - .3 - .iter() - .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) - .collect::>() - ); - */ - - Ok::<_, Error>(()) - }; - - for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() { - let sstatehash = utils::u64_from_bytes(&k[0..size_of::()]) - .expect("number of bytes is correct"); - let sstatekey = k[size_of::()..].to_vec(); - if Some(sstatehash) != current_sstatehash { - if let Some(current_sstatehash) = current_sstatehash { - handle_state( - current_sstatehash, - current_room.as_ref().unwrap(), - current_state, - &mut last_roomstates, - )?; - last_roomstates - .insert(current_room.clone().unwrap(), current_sstatehash); - } - current_state = HashSet::new(); - current_sstatehash = Some(sstatehash); - - let event_id = db - .rooms - .shorteventid_eventid - .get(&seventid) - .unwrap() - .unwrap(); - let event_id = - EventId::try_from(utils::string_from_bytes(&event_id).unwrap()) - .unwrap(); - let pdu = db.rooms.get_pdu(&event_id).unwrap().unwrap(); - - if Some(&pdu.room_id) != current_room.as_ref() { - current_room = Some(pdu.room_id.clone()); - } - } - - let mut val = sstatekey; - val.extend_from_slice(&seventid); - current_state.insert(val.try_into().expect("size is correct")); - } - - if let Some(current_sstatehash) = current_sstatehash { - handle_state( - current_sstatehash, - current_room.as_ref().unwrap(), - current_state, - &mut last_roomstates, - )?; - } - - db.globals.bump_database_version(7)?; - - println!("Migration: 6 -> 7 finished"); - } - - if db.globals.database_version()? < 8 { - // Generate short room ids for all rooms - for (room_id, _) in db.rooms.roomid_shortstatehash.iter() { - let shortroomid = db.globals.next_count()?.to_be_bytes(); - db.rooms.roomid_shortroomid.insert(&room_id, &shortroomid)?; - println!("Migration: 8"); - } - // Update pduids db layout - let mut batch = db.rooms.pduid_pdu.iter().filter_map(|(key, v)| { - if !key.starts_with(b"!") { - return None; - } - let mut parts = key.splitn(2, |&b| b == 0xff); - let room_id = parts.next().unwrap(); - let count = parts.next().unwrap(); - - let short_room_id = db - .rooms - .roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - - let mut new_key = short_room_id; - new_key.extend_from_slice(count); - - Some((new_key, v)) - }); - - db.rooms.pduid_pdu.insert_batch(&mut batch)?; - - let mut batch2 = db.rooms.eventid_pduid.iter().filter_map(|(k, value)| { - if !value.starts_with(b"!") { - return None; - } - let mut parts = value.splitn(2, |&b| b == 0xff); - let room_id = parts.next().unwrap(); - let count = parts.next().unwrap(); - - let short_room_id = db - .rooms - .roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - - let mut new_value = short_room_id; - new_value.extend_from_slice(count); - - Some((k, new_value)) - }); - - db.rooms.eventid_pduid.insert_batch(&mut batch2)?; - - db.globals.bump_database_version(8)?; - - println!("Migration: 7 -> 8 finished"); - } - - if db.globals.database_version()? < 9 { - // Update tokenids db layout - let mut iter = db - .rooms - .tokenids - .iter() - .filter_map(|(key, _)| { - if !key.starts_with(b"!") { - return None; - } - let mut parts = key.splitn(4, |&b| b == 0xff); - let room_id = parts.next().unwrap(); - let word = parts.next().unwrap(); - let _pdu_id_room = parts.next().unwrap(); - let pdu_id_count = parts.next().unwrap(); - - let short_room_id = db - .rooms - .roomid_shortroomid - .get(room_id) - .unwrap() - .expect("shortroomid should exist"); - let mut new_key = short_room_id; - new_key.extend_from_slice(word); - new_key.push(0xff); - new_key.extend_from_slice(pdu_id_count); - println!("old {:?}", key); - println!("new {:?}", new_key); - Some((new_key, Vec::new())) - }) - .peekable(); - - while iter.peek().is_some() { - db.rooms - .tokenids - .insert_batch(&mut iter.by_ref().take(1000))?; - println!("smaller batch done"); - } - - println!("Deleting starts"); - - let batch2: Vec<_> = db - .rooms - .tokenids - .iter() - .filter_map(|(key, _)| { - if key.starts_with(b"!") { - println!("del {:?}", key); - Some(key) - } else { - None - } - }) - .collect(); - - for key in batch2 { - println!("del"); - db.rooms.tokenids.remove(&key)?; - } - - db.globals.bump_database_version(9)?; - - println!("Migration: 8 -> 9 finished"); - } - - if db.globals.database_version()? < 10 { - // Add other direction for shortstatekeys - for (statekey, shortstatekey) in db.rooms.statekey_shortstatekey.iter() { - db.rooms - .shortstatekey_statekey - .insert(&shortstatekey, &statekey)?; - } - - // Force E2EE device list updates so we can send them over federation - for user_id in db.users.iter().filter_map(|r| r.ok()) { - db.users - .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; - } - - db.globals.bump_database_version(10)?; - - println!("Migration: 9 -> 10 finished"); - } - } - - let guard = db.read().await; - - // This data is probably outdated - guard.rooms.edus.presenceid_presence.clear()?; - - guard.admin.start_handler(Arc::clone(&db), admin_receiver); - guard - .sending - .start_handler(Arc::clone(&db), sending_receiver); - - drop(guard); - - #[cfg(feature = "sqlite")] - { - Self::start_wal_clean_task(Arc::clone(&db), config).await; - } - - Ok(db) - } - - #[cfg(feature = "conduit_bin")] - pub async fn start_on_shutdown_tasks(db: Arc>, shutdown: Shutdown) { - use tracing::info; - - tokio::spawn(async move { - shutdown.await; - - info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); - - db.read().await.globals.rotate.fire(); - }); - } - - pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) { - let userid_bytes = user_id.as_bytes().to_vec(); - let mut userid_prefix = userid_bytes.clone(); - userid_prefix.push(0xff); - - let mut userdeviceid_prefix = userid_prefix.clone(); - userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); - userdeviceid_prefix.push(0xff); - - let mut futures = FuturesUnordered::new(); - - // Return when *any* user changed his key - // TODO: only send for user they share a room with - futures.push( - self.users - .todeviceid_events - .watch_prefix(&userdeviceid_prefix), - ); - - futures.push(self.rooms.userroomid_joined.watch_prefix(&userid_prefix)); - futures.push( - self.rooms - .userroomid_invitestate - .watch_prefix(&userid_prefix), - ); - futures.push(self.rooms.userroomid_leftstate.watch_prefix(&userid_prefix)); - futures.push( - self.rooms - .userroomid_notificationcount - .watch_prefix(&userid_prefix), - ); - futures.push( - self.rooms - .userroomid_highlightcount - .watch_prefix(&userid_prefix), - ); - - // Events for rooms we are in - for room_id in self.rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { - let short_roomid = self - .rooms - .get_shortroomid(&room_id) - .ok() - .flatten() - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let roomid_bytes = room_id.as_bytes().to_vec(); - let mut roomid_prefix = roomid_bytes.clone(); - roomid_prefix.push(0xff); - - // PDUs - futures.push(self.rooms.pduid_pdu.watch_prefix(&short_roomid)); - - // EDUs - futures.push( - self.rooms - .edus - .roomid_lasttypingupdate - .watch_prefix(&roomid_bytes), - ); - - futures.push( - self.rooms - .edus - .readreceiptid_readreceipt - .watch_prefix(&roomid_prefix), - ); - - // Key changes - futures.push(self.users.keychangeid_userid.watch_prefix(&roomid_prefix)); - - // Room account data - let mut roomuser_prefix = roomid_prefix.clone(); - roomuser_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.account_data - .roomusertype_roomuserdataid - .watch_prefix(&roomuser_prefix), - ); - } - - let mut globaluserdata_prefix = vec![0xff]; - globaluserdata_prefix.extend_from_slice(&userid_prefix); - - futures.push( - self.account_data - .roomusertype_roomuserdataid - .watch_prefix(&globaluserdata_prefix), - ); - - // More key changes (used when user is not joined to any rooms) - futures.push(self.users.keychangeid_userid.watch_prefix(&userid_prefix)); - - // One time keys - futures.push( - self.users - .userid_lastonetimekeyupdate - .watch_prefix(&userid_bytes), - ); - - futures.push(Box::pin(self.globals.rotate.watch())); - - // Wait until one of them finds something - futures.next().await; - } - - #[tracing::instrument(skip(self))] - pub fn flush(&self) -> Result<()> { - let start = std::time::Instant::now(); - - let res = self._db.flush(); - - debug!("flush: took {:?}", start.elapsed()); - - res - } - - #[cfg(feature = "sqlite")] - #[tracing::instrument(skip(self))] - pub fn flush_wal(&self) -> Result<()> { - self._db.flush_wal() - } - - #[cfg(feature = "sqlite")] - #[tracing::instrument(skip(db, config))] - pub async fn start_wal_clean_task(db: Arc>, config: &Config) { - use tokio::time::interval; - - #[cfg(unix)] - use tokio::signal::unix::{signal, SignalKind}; - use tracing::info; - - use std::time::{Duration, Instant}; - - let timer_interval = Duration::from_secs(config.sqlite_wal_clean_second_interval as u64); - - tokio::spawn(async move { - let mut i = interval(timer_interval); - #[cfg(unix)] - let mut s = signal(SignalKind::hangup()).unwrap(); - - loop { - #[cfg(unix)] - tokio::select! { - _ = i.tick() => { - info!("wal-trunc: Timer ticked"); - } - _ = s.recv() => { - info!("wal-trunc: Received SIGHUP"); - } - }; - #[cfg(not(unix))] - { - i.tick().await; - info!("wal-trunc: Timer ticked") - } - - let start = Instant::now(); - if let Err(e) = db.read().await.flush_wal() { - error!("wal-trunc: Errored: {}", e); - } else { - info!("wal-trunc: Flushed in {:?}", start.elapsed()); - } - } - }); - } -} - -pub struct DatabaseGuard(OwnedRwLockReadGuard); - -impl Deref for DatabaseGuard { - type Target = OwnedRwLockReadGuard; - - fn deref(&self) -> &Self::Target { - &self.0 - } -} - -#[rocket::async_trait] -impl<'r> FromRequest<'r> for DatabaseGuard { - type Error = (); - - async fn from_request(req: &'r Request<'_>) -> rocket::request::Outcome { - let db = try_outcome!(req.guard::<&State>>>().await); - - Ok(DatabaseGuard(Arc::clone(db).read_owned().await)).or_forward(()) - } -} - -impl From> for DatabaseGuard { - fn from(val: OwnedRwLockReadGuard) -> Self { - Self(val) - } -} diff --git a/src/database/abstraction.rs b/src/database/abstraction.rs index 11bbc3b1..93660f9f 100644 --- a/src/database/abstraction.rs +++ b/src/database/abstraction.rs @@ -12,13 +12,35 @@ pub mod sqlite; #[cfg(feature = "heed")] pub mod heed; -pub trait DatabaseEngine: Sized { - fn open(config: &Config) -> Result>; - fn open_tree(self: &Arc, name: &'static str) -> Result>; - fn flush(self: &Arc) -> Result<()>; +#[cfg(feature = "rocksdb")] +pub mod rocksdb; + +#[cfg(feature = "persy")] +pub mod persy; + +#[cfg(any( + feature = "sqlite", + feature = "rocksdb", + feature = "heed", + feature = "persy" +))] +pub mod watchers; + +pub trait KeyValueDatabaseEngine: Send + Sync { + fn open(config: &Config) -> Result + where + Self: Sized; + fn open_tree(&self, name: &'static str) -> Result>; + fn flush(&self) -> Result<()>; + fn cleanup(&self) -> Result<()> { + Ok(()) + } + fn memory_usage(&self) -> Result { + Ok("Current database engine does not support memory usage reporting.".to_owned()) + } } -pub trait Tree: Send + Sync { +pub trait KvTree: Send + Sync { fn get(&self, key: &[u8]) -> Result>>; fn insert(&self, key: &[u8], value: &[u8]) -> Result<()>; diff --git a/src/database/abstraction/heed.rs b/src/database/abstraction/heed.rs index e767e22b..9cca0975 100644 --- a/src/database/abstraction/heed.rs +++ b/src/database/abstraction/heed.rs @@ -1,15 +1,13 @@ -use super::super::Config; +use super::{super::Config, watchers::Watchers}; use crossbeam::channel::{bounded, Sender as ChannelSender}; use threadpool::ThreadPool; use crate::{Error, Result}; use std::{ - collections::HashMap, future::Future, pin::Pin, - sync::{Arc, Mutex, RwLock}, + sync::{Arc, Mutex}, }; -use tokio::sync::oneshot::Sender; use super::{DatabaseEngine, Tree}; @@ -23,7 +21,7 @@ pub struct Engine { pub struct EngineTree { engine: Arc, tree: Arc, - watchers: RwLock, Vec>>>, + watchers: Watchers, } fn convert_error(error: heed::Error) -> Error { @@ -60,7 +58,7 @@ impl DatabaseEngine for Engine { .create_database(Some(name)) .map_err(convert_error)?, ), - watchers: RwLock::new(HashMap::new()), + watchers: Default::default(), })) } @@ -71,7 +69,6 @@ impl DatabaseEngine for Engine { } impl EngineTree { - #[tracing::instrument(skip(self, tree, from, backwards))] fn iter_from_thread( &self, tree: Arc, @@ -96,7 +93,6 @@ impl EngineTree { } } -#[tracing::instrument(skip(tree, txn, from, backwards))] fn iter_from_thread_work( tree: Arc, txn: &heed::RoTxn<'_>, @@ -128,7 +124,6 @@ fn iter_from_thread_work( } impl Tree for EngineTree { - #[tracing::instrument(skip(self, key))] fn get(&self, key: &[u8]) -> Result>> { let txn = self.engine.env.read_txn().map_err(convert_error)?; Ok(self @@ -138,40 +133,16 @@ impl Tree for EngineTree { .map(|s| s.to_vec())) } - #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; self.tree .put(&mut txn, &key, &value) .map_err(convert_error)?; txn.commit().map_err(convert_error)?; - - let watchers = self.watchers.read().unwrap(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write().unwrap(); - for prefix in triggered { - if let Some(txs) = watchers.remove(prefix) { - for tx in txs { - let _ = tx.send(()); - } - } - } - }; - + self.watchers.wake(key); Ok(()) } - #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; self.tree.delete(&mut txn, &key).map_err(convert_error)?; @@ -179,12 +150,10 @@ impl Tree for EngineTree { Ok(()) } - #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box, Vec)> + Send + 'a> { self.iter_from(&[], false) } - #[tracing::instrument(skip(self, from, backwards))] fn iter_from( &self, from: &[u8], @@ -193,7 +162,6 @@ impl Tree for EngineTree { self.iter_from_thread(Arc::clone(&self.tree), from.to_vec(), backwards) } - #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { let mut txn = self.engine.env.write_txn().map_err(convert_error)?; @@ -210,7 +178,6 @@ impl Tree for EngineTree { Ok(new) } - #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>( &'a self, prefix: Vec, @@ -221,20 +188,7 @@ impl Tree for EngineTree { ) } - #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let (tx, rx) = tokio::sync::oneshot::channel(); - - self.watchers - .write() - .unwrap() - .entry(prefix.to_vec()) - .or_default() - .push(tx); - - Box::pin(async move { - // Tx is never destroyed - rx.await.unwrap(); - }) + self.watchers.watch(prefix) } } diff --git a/src/database/abstraction/persy.rs b/src/database/abstraction/persy.rs new file mode 100644 index 00000000..e78e731d --- /dev/null +++ b/src/database/abstraction/persy.rs @@ -0,0 +1,197 @@ +use crate::{ + database::{ + abstraction::{watchers::Watchers, DatabaseEngine, Tree}, + Config, + }, + Result, +}; +use persy::{ByteVec, OpenOptions, Persy, Transaction, TransactionConfig, ValueMode}; + +use std::{future::Future, pin::Pin, sync::Arc}; + +use tracing::warn; + +pub struct Engine { + persy: Persy, +} + +impl DatabaseEngine for Arc { + fn open(config: &Config) -> Result { + let mut cfg = persy::Config::new(); + cfg.change_cache_size((config.db_cache_capacity_mb * 1024.0 * 1024.0) as u64); + + let persy = OpenOptions::new() + .create(true) + .config(cfg) + .open(&format!("{}/db.persy", config.database_path))?; + Ok(Arc::new(Engine { persy })) + } + + fn open_tree(&self, name: &'static str) -> Result> { + // Create if it doesn't exist + if !self.persy.exists_index(name)? { + let mut tx = self.persy.begin()?; + tx.create_index::(name, ValueMode::Replace)?; + tx.prepare()?.commit()?; + } + + Ok(Arc::new(PersyTree { + persy: self.persy.clone(), + name: name.to_owned(), + watchers: Watchers::default(), + })) + } + + fn flush(&self) -> Result<()> { + Ok(()) + } +} + +pub struct PersyTree { + persy: Persy, + name: String, + watchers: Watchers, +} + +impl PersyTree { + fn begin(&self) -> Result { + Ok(self + .persy + .begin_with(TransactionConfig::new().set_background_sync(true))?) + } +} + +impl Tree for PersyTree { + fn get(&self, key: &[u8]) -> Result>> { + let result = self + .persy + .get::(&self.name, &ByteVec::from(key))? + .next() + .map(|v| (*v).to_owned()); + Ok(result) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + self.insert_batch(&mut Some((key.to_owned(), value.to_owned())).into_iter())?; + self.watchers.wake(key); + Ok(()) + } + + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + let mut tx = self.begin()?; + for (key, value) in iter { + tx.put::( + &self.name, + ByteVec::from(key.clone()), + ByteVec::from(value), + )?; + } + tx.prepare()?.commit()?; + Ok(()) + } + + fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + let mut tx = self.begin()?; + for key in iter { + let old = tx + .get::(&self.name, &ByteVec::from(key.clone()))? + .next() + .map(|v| (*v).to_owned()); + let new = crate::utils::increment(old.as_deref()).unwrap(); + tx.put::(&self.name, ByteVec::from(key), ByteVec::from(new))?; + } + tx.prepare()?.commit()?; + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + let mut tx = self.begin()?; + tx.remove::(&self.name, ByteVec::from(key), None)?; + tx.prepare()?.commit()?; + Ok(()) + } + + fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { + let iter = self.persy.range::(&self.name, ..); + match iter { + Ok(iter) => Box::new(iter.filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + })), + Err(e) => { + warn!("error iterating {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + 'a> { + let range = if backwards { + self.persy + .range::(&self.name, ..=ByteVec::from(from)) + } else { + self.persy + .range::(&self.name, ByteVec::from(from)..) + }; + match range { + Ok(iter) => { + let map = iter.filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + }); + if backwards { + Box::new(map.rev()) + } else { + Box::new(map) + } + } + Err(e) => { + warn!("error iterating with prefix {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + fn increment(&self, key: &[u8]) -> Result> { + self.increment_batch(&mut Some(key.to_owned()).into_iter())?; + Ok(self.get(key)?.unwrap()) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + 'a> { + let range_prefix = ByteVec::from(prefix.clone()); + let range = self + .persy + .range::(&self.name, range_prefix..); + + match range { + Ok(iter) => { + let owned_prefix = prefix.clone(); + Box::new( + iter.take_while(move |(k, _)| (*k).starts_with(&owned_prefix)) + .filter_map(|(k, v)| { + v.into_iter() + .map(|val| ((*k).to_owned().into(), (*val).to_owned().into())) + .next() + }), + ) + } + Err(e) => { + warn!("error scanning prefix {:?}", e); + Box::new(std::iter::empty()) + } + } + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + self.watchers.watch(prefix) + } +} diff --git a/src/database/abstraction/rocksdb.rs b/src/database/abstraction/rocksdb.rs new file mode 100644 index 00000000..34d91d29 --- /dev/null +++ b/src/database/abstraction/rocksdb.rs @@ -0,0 +1,238 @@ +use super::{super::Config, watchers::Watchers, KeyValueDatabaseEngine, KvTree}; +use crate::{utils, Result}; +use std::{ + future::Future, + pin::Pin, + sync::{Arc, RwLock}, +}; + +pub struct Engine { + rocks: rocksdb::DBWithThreadMode, + max_open_files: i32, + cache: rocksdb::Cache, + old_cfs: Vec, +} + +pub struct RocksDbEngineTree<'a> { + db: Arc, + name: &'a str, + watchers: Watchers, + write_lock: RwLock<()>, +} + +fn db_options(max_open_files: i32, rocksdb_cache: &rocksdb::Cache) -> rocksdb::Options { + let mut block_based_options = rocksdb::BlockBasedOptions::default(); + block_based_options.set_block_cache(rocksdb_cache); + + // "Difference of spinning disk" + // https://zhangyuchi.gitbooks.io/rocksdbbook/content/RocksDB-Tuning-Guide.html + block_based_options.set_block_size(4 * 1024); + block_based_options.set_cache_index_and_filter_blocks(true); + + let mut db_opts = rocksdb::Options::default(); + db_opts.set_block_based_table_factory(&block_based_options); + db_opts.set_optimize_filters_for_hits(true); + db_opts.set_skip_stats_update_on_db_open(true); + db_opts.set_level_compaction_dynamic_level_bytes(true); + db_opts.set_target_file_size_base(256 * 1024 * 1024); + //db_opts.set_compaction_readahead_size(2 * 1024 * 1024); + //db_opts.set_use_direct_reads(true); + //db_opts.set_use_direct_io_for_flush_and_compaction(true); + db_opts.create_if_missing(true); + db_opts.increase_parallelism(num_cpus::get() as i32); + db_opts.set_max_open_files(max_open_files); + db_opts.set_compression_type(rocksdb::DBCompressionType::Zstd); + db_opts.set_compaction_style(rocksdb::DBCompactionStyle::Level); + db_opts.optimize_level_style_compaction(10 * 1024 * 1024); + + let prefix_extractor = rocksdb::SliceTransform::create_fixed_prefix(1); + db_opts.set_prefix_extractor(prefix_extractor); + + db_opts +} + +impl KeyValueDatabaseEngine for Arc { + fn open(config: &Config) -> Result { + let cache_capacity_bytes = (config.db_cache_capacity_mb * 1024.0 * 1024.0) as usize; + let rocksdb_cache = rocksdb::Cache::new_lru_cache(cache_capacity_bytes).unwrap(); + + let db_opts = db_options(config.rocksdb_max_open_files, &rocksdb_cache); + + let cfs = rocksdb::DBWithThreadMode::::list_cf( + &db_opts, + &config.database_path, + ) + .unwrap_or_default(); + + let db = rocksdb::DBWithThreadMode::::open_cf_descriptors( + &db_opts, + &config.database_path, + cfs.iter().map(|name| { + rocksdb::ColumnFamilyDescriptor::new( + name, + db_options(config.rocksdb_max_open_files, &rocksdb_cache), + ) + }), + )?; + + Ok(Arc::new(Engine { + rocks: db, + max_open_files: config.rocksdb_max_open_files, + cache: rocksdb_cache, + old_cfs: cfs, + })) + } + + fn open_tree(&self, name: &'static str) -> Result> { + if !self.old_cfs.contains(&name.to_owned()) { + // Create if it didn't exist + let _ = self + .rocks + .create_cf(name, &db_options(self.max_open_files, &self.cache)); + } + + Ok(Arc::new(RocksDbEngineTree { + name, + db: Arc::clone(self), + watchers: Watchers::default(), + write_lock: RwLock::new(()), + })) + } + + fn flush(&self) -> Result<()> { + // TODO? + Ok(()) + } + + fn memory_usage(&self) -> Result { + let stats = + rocksdb::perf::get_memory_usage_stats(Some(&[&self.rocks]), Some(&[&self.cache]))?; + Ok(format!( + "Approximate memory usage of all the mem-tables: {:.3} MB\n\ + Approximate memory usage of un-flushed mem-tables: {:.3} MB\n\ + Approximate memory usage of all the table readers: {:.3} MB\n\ + Approximate memory usage by cache: {:.3} MB\n\ + Approximate memory usage by cache pinned: {:.3} MB\n\ + ", + stats.mem_table_total as f64 / 1024.0 / 1024.0, + stats.mem_table_unflushed as f64 / 1024.0 / 1024.0, + stats.mem_table_readers_total as f64 / 1024.0 / 1024.0, + stats.cache_total as f64 / 1024.0 / 1024.0, + self.cache.get_pinned_usage() as f64 / 1024.0 / 1024.0, + )) + } +} + +impl RocksDbEngineTree<'_> { + fn cf(&self) -> Arc> { + self.db.rocks.cf_handle(self.name).unwrap() + } +} + +impl KvTree for RocksDbEngineTree<'_> { + fn get(&self, key: &[u8]) -> Result>> { + Ok(self.db.rocks.get_cf(&self.cf(), key)?) + } + + fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { + let lock = self.write_lock.read().unwrap(); + self.db.rocks.put_cf(&self.cf(), key, value)?; + drop(lock); + + self.watchers.wake(key); + + Ok(()) + } + + fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { + for (key, value) in iter { + self.db.rocks.put_cf(&self.cf(), key, value)?; + } + + Ok(()) + } + + fn remove(&self, key: &[u8]) -> Result<()> { + Ok(self.db.rocks.delete_cf(&self.cf(), key)?) + } + + fn iter<'a>(&'a self) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf(&self.cf(), rocksdb::IteratorMode::Start) + //.map(|r| r.unwrap()) + .map(|(k, v)| (Vec::from(k), Vec::from(v))), + ) + } + + fn iter_from<'a>( + &'a self, + from: &[u8], + backwards: bool, + ) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf( + &self.cf(), + rocksdb::IteratorMode::From( + from, + if backwards { + rocksdb::Direction::Reverse + } else { + rocksdb::Direction::Forward + }, + ), + ) + //.map(|r| r.unwrap()) + .map(|(k, v)| (Vec::from(k), Vec::from(v))), + ) + } + + fn increment(&self, key: &[u8]) -> Result> { + let lock = self.write_lock.write().unwrap(); + + let old = self.db.rocks.get_cf(&self.cf(), key)?; + let new = utils::increment(old.as_deref()).unwrap(); + self.db.rocks.put_cf(&self.cf(), key, &new)?; + + drop(lock); + Ok(new) + } + + fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { + let lock = self.write_lock.write().unwrap(); + + for key in iter { + let old = self.db.rocks.get_cf(&self.cf(), &key)?; + let new = utils::increment(old.as_deref()).unwrap(); + self.db.rocks.put_cf(&self.cf(), key, new)?; + } + + drop(lock); + + Ok(()) + } + + fn scan_prefix<'a>( + &'a self, + prefix: Vec, + ) -> Box, Vec)> + 'a> { + Box::new( + self.db + .rocks + .iterator_cf( + &self.cf(), + rocksdb::IteratorMode::From(&prefix, rocksdb::Direction::Forward), + ) + //.map(|r| r.unwrap()) + .map(|(k, v)| (Vec::from(k), Vec::from(v))) + .take_while(move |(k, _)| k.starts_with(&prefix)), + ) + } + + fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { + self.watchers.watch(prefix) + } +} diff --git a/src/database/abstraction/sled.rs b/src/database/abstraction/sled.rs index 35ba1b29..87defc57 100644 --- a/src/database/abstraction/sled.rs +++ b/src/database/abstraction/sled.rs @@ -39,7 +39,6 @@ impl Tree for SledEngineTree { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { for (key, value) in iter { self.0.insert(key, value)?; diff --git a/src/database/abstraction/sqlite.rs b/src/database/abstraction/sqlite.rs index 1d2038c5..4961fd74 100644 --- a/src/database/abstraction/sqlite.rs +++ b/src/database/abstraction/sqlite.rs @@ -1,17 +1,15 @@ -use super::{DatabaseEngine, Tree}; +use super::{watchers::Watchers, KeyValueDatabaseEngine, KvTree}; use crate::{database::Config, Result}; -use parking_lot::{Mutex, MutexGuard, RwLock}; +use parking_lot::{Mutex, MutexGuard}; use rusqlite::{Connection, DatabaseName::Main, OptionalExtension}; use std::{ cell::RefCell, - collections::{hash_map, HashMap}, future::Future, path::{Path, PathBuf}, pin::Pin, sync::Arc, }; use thread_local::ThreadLocal; -use tokio::sync::watch; use tracing::debug; thread_local! { @@ -21,7 +19,7 @@ thread_local! { struct PreparedStatementIterator<'a> { pub iterator: Box + 'a>, - pub statement_ref: NonAliasingBox>, + pub _statement_ref: NonAliasingBox>, } impl Iterator for PreparedStatementIterator<'_> { @@ -50,13 +48,13 @@ pub struct Engine { impl Engine { fn prepare_conn(path: &Path, cache_size_kb: u32) -> Result { - let conn = Connection::open(&path)?; + let conn = Connection::open(path)?; - conn.pragma_update(Some(Main), "page_size", &2048)?; - conn.pragma_update(Some(Main), "journal_mode", &"WAL")?; - conn.pragma_update(Some(Main), "synchronous", &"NORMAL")?; - conn.pragma_update(Some(Main), "cache_size", &(-i64::from(cache_size_kb)))?; - conn.pragma_update(Some(Main), "wal_autocheckpoint", &0)?; + conn.pragma_update(Some(Main), "page_size", 2048)?; + conn.pragma_update(Some(Main), "journal_mode", "WAL")?; + conn.pragma_update(Some(Main), "synchronous", "NORMAL")?; + conn.pragma_update(Some(Main), "cache_size", -i64::from(cache_size_kb))?; + conn.pragma_update(Some(Main), "wal_autocheckpoint", 0)?; Ok(conn) } @@ -77,13 +75,13 @@ impl Engine { pub fn flush_wal(self: &Arc) -> Result<()> { self.write_lock() - .pragma_update(Some(Main), "wal_checkpoint", &"RESTART")?; + .pragma_update(Some(Main), "wal_checkpoint", "RESTART")?; Ok(()) } } -impl DatabaseEngine for Engine { - fn open(config: &Config) -> Result> { +impl KeyValueDatabaseEngine for Arc { + fn open(config: &Config) -> Result { let path = Path::new(&config.database_path).join("conduit.db"); // calculates cache-size per permanent connection @@ -94,7 +92,7 @@ impl DatabaseEngine for Engine { / ((num_cpus::get().max(1) * 2) + 1) as f64) as u32; - let writer = Mutex::new(Self::prepare_conn(&path, cache_size_per_thread)?); + let writer = Mutex::new(Engine::prepare_conn(&path, cache_size_per_thread)?); let arc = Arc::new(Engine { writer, @@ -107,32 +105,35 @@ impl DatabaseEngine for Engine { Ok(arc) } - fn open_tree(self: &Arc, name: &str) -> Result> { + fn open_tree(&self, name: &str) -> Result> { self.write_lock().execute(&format!("CREATE TABLE IF NOT EXISTS {} ( \"key\" BLOB PRIMARY KEY, \"value\" BLOB NOT NULL )", name), [])?; Ok(Arc::new(SqliteTable { engine: Arc::clone(self), name: name.to_owned(), - watchers: RwLock::new(HashMap::new()), + watchers: Watchers::default(), })) } - fn flush(self: &Arc) -> Result<()> { + fn flush(&self) -> Result<()> { // we enabled PRAGMA synchronous=normal, so this should not be necessary Ok(()) } + + fn cleanup(&self) -> Result<()> { + self.flush_wal() + } } pub struct SqliteTable { engine: Arc, name: String, - watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, + watchers: Watchers, } type TupleOfBytes = (Vec, Vec); impl SqliteTable { - #[tracing::instrument(skip(self, guard, key))] fn get_with_guard(&self, guard: &Connection, key: &[u8]) -> Result>> { //dbg!(&self.name); Ok(guard @@ -141,7 +142,6 @@ impl SqliteTable { .optional()?) } - #[tracing::instrument(skip(self, guard, key, value))] fn insert_with_guard(&self, guard: &Connection, key: &[u8], value: &[u8]) -> Result<()> { //dbg!(&self.name); guard.execute( @@ -184,47 +184,24 @@ impl SqliteTable { Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } } -impl Tree for SqliteTable { - #[tracing::instrument(skip(self, key))] +impl KvTree for SqliteTable { fn get(&self, key: &[u8]) -> Result>> { self.get_with_guard(self.engine.read_lock(), key) } - #[tracing::instrument(skip(self, key, value))] fn insert(&self, key: &[u8], value: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); self.insert_with_guard(&guard, key, value)?; drop(guard); - - let watchers = self.watchers.read(); - let mut triggered = Vec::new(); - - for length in 0..=key.len() { - if watchers.contains_key(&key[..length]) { - triggered.push(&key[..length]); - } - } - - drop(watchers); - - if !triggered.is_empty() { - let mut watchers = self.watchers.write(); - for prefix in triggered { - if let Some(tx) = watchers.remove(prefix) { - let _ = tx.0.send(()); - } - } - }; - + self.watchers.wake(key); Ok(()) } - #[tracing::instrument(skip(self, iter))] fn insert_batch<'a>(&self, iter: &mut dyn Iterator, Vec)>) -> Result<()> { let guard = self.engine.write_lock(); @@ -239,7 +216,6 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self, iter))] fn increment_batch<'a>(&self, iter: &mut dyn Iterator>) -> Result<()> { let guard = self.engine.write_lock(); @@ -257,7 +233,6 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self, key))] fn remove(&self, key: &[u8]) -> Result<()> { let guard = self.engine.write_lock(); @@ -269,14 +244,12 @@ impl Tree for SqliteTable { Ok(()) } - #[tracing::instrument(skip(self))] fn iter<'a>(&'a self) -> Box + 'a> { let guard = self.engine.read_lock_iterator(); self.iter_with_guard(guard) } - #[tracing::instrument(skip(self, from, backwards))] fn iter_from<'a>( &'a self, from: &[u8], @@ -310,7 +283,7 @@ impl Tree for SqliteTable { ); Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } else { let statement = Box::leak(Box::new( @@ -336,12 +309,11 @@ impl Tree for SqliteTable { Box::new(PreparedStatementIterator { iterator, - statement_ref, + _statement_ref: statement_ref, }) } } - #[tracing::instrument(skip(self, key))] fn increment(&self, key: &[u8]) -> Result> { let guard = self.engine.write_lock(); @@ -355,7 +327,6 @@ impl Tree for SqliteTable { Ok(new) } - #[tracing::instrument(skip(self, prefix))] fn scan_prefix<'a>(&'a self, prefix: Vec) -> Box + 'a> { Box::new( self.iter_from(&prefix, false) @@ -363,24 +334,10 @@ impl Tree for SqliteTable { ) } - #[tracing::instrument(skip(self, prefix))] fn watch_prefix<'a>(&'a self, prefix: &[u8]) -> Pin + Send + 'a>> { - let mut rx = match self.watchers.write().entry(prefix.to_vec()) { - hash_map::Entry::Occupied(o) => o.get().1.clone(), - hash_map::Entry::Vacant(v) => { - let (tx, rx) = tokio::sync::watch::channel(()); - v.insert((tx, rx.clone())); - rx - } - }; - - Box::pin(async move { - // Tx is never destroyed - rx.changed().await.unwrap(); - }) + self.watchers.watch(prefix) } - #[tracing::instrument(skip(self))] fn clear(&self) -> Result<()> { debug!("clear: running"); self.engine diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs new file mode 100644 index 00000000..55cb60b3 --- /dev/null +++ b/src/database/abstraction/watchers.rs @@ -0,0 +1,54 @@ +use std::{ + collections::{hash_map, HashMap}, + future::Future, + pin::Pin, + sync::RwLock, +}; +use tokio::sync::watch; + +#[derive(Default)] +pub(super) struct Watchers { + watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, +} + +impl Watchers { + pub(super) fn watch<'a>( + &'a self, + prefix: &[u8], + ) -> Pin + Send + 'a>> { + let mut rx = match self.watchers.write().unwrap().entry(prefix.to_vec()) { + hash_map::Entry::Occupied(o) => o.get().1.clone(), + hash_map::Entry::Vacant(v) => { + let (tx, rx) = tokio::sync::watch::channel(()); + v.insert((tx, rx.clone())); + rx + } + }; + + Box::pin(async move { + // Tx is never destroyed + rx.changed().await.unwrap(); + }) + } + pub(super) fn wake(&self, key: &[u8]) { + let watchers = self.watchers.read().unwrap(); + let mut triggered = Vec::new(); + + for length in 0..=key.len() { + if watchers.contains_key(&key[..length]) { + triggered.push(&key[..length]); + } + } + + drop(watchers); + + if !triggered.is_empty() { + let mut watchers = self.watchers.write().unwrap(); + for prefix in triggered { + if let Some(tx) = watchers.remove(prefix) { + let _ = tx.0.send(()); + } + } + }; + } +} diff --git a/src/database/admin.rs b/src/database/admin.rs deleted file mode 100644 index 8d8559a5..00000000 --- a/src/database/admin.rs +++ /dev/null @@ -1,130 +0,0 @@ -use std::{ - convert::{TryFrom, TryInto}, - sync::Arc, -}; - -use crate::{pdu::PduBuilder, Database}; -use rocket::futures::{channel::mpsc, stream::StreamExt}; -use ruma::{ - events::{room::message::RoomMessageEventContent, EventType}, - UserId, -}; -use serde_json::value::to_raw_value; -use tokio::sync::{MutexGuard, RwLock, RwLockReadGuard}; -use tracing::warn; - -pub enum AdminCommand { - RegisterAppservice(serde_yaml::Value), - ListAppservices, - SendMessage(RoomMessageEventContent), -} - -#[derive(Clone)] -pub struct Admin { - pub sender: mpsc::UnboundedSender, -} - -impl Admin { - pub fn start_handler( - &self, - db: Arc>, - mut receiver: mpsc::UnboundedReceiver, - ) { - tokio::spawn(async move { - // TODO: Use futures when we have long admin commands - //let mut futures = FuturesUnordered::new(); - - let guard = db.read().await; - - let conduit_user = - UserId::try_from(format!("@conduit:{}", guard.globals.server_name())) - .expect("@conduit:server_name is valid"); - - let conduit_room = guard - .rooms - .id_from_alias( - &format!("#admins:{}", guard.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid room alias"), - ) - .unwrap(); - - let conduit_room = match conduit_room { - None => { - warn!("Conduit instance does not have an #admins room. Logging to that room will not work. Restart Conduit after creating a user to fix this."); - return; - } - Some(r) => r, - }; - - drop(guard); - - let send_message = |message: RoomMessageEventContent, - guard: RwLockReadGuard<'_, Database>, - mutex_lock: &MutexGuard<'_, ()>| { - guard - .rooms - .build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMessage, - content: to_raw_value(&message) - .expect("event is valid, we just created it"), - unsigned: None, - state_key: None, - redacts: None, - }, - &conduit_user, - &conduit_room, - &guard, - mutex_lock, - ) - .unwrap(); - }; - - loop { - tokio::select! { - Some(event) = receiver.next() => { - let guard = db.read().await; - let mutex_state = Arc::clone( - guard.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(conduit_room.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - match event { - AdminCommand::RegisterAppservice(yaml) => { - guard.appservice.register_appservice(yaml).unwrap(); // TODO handle error - } - AdminCommand::ListAppservices => { - if let Ok(appservices) = guard.appservice.iter_ids().map(|ids| ids.collect::>()) { - let count = appservices.len(); - let output = format!( - "Appservices ({}): {}", - count, - appservices.into_iter().filter_map(|r| r.ok()).collect::>().join(", ") - ); - send_message(RoomMessageEventContent::text_plain(output), guard, &state_lock); - } else { - send_message(RoomMessageEventContent::text_plain("Failed to get appservices."), guard, &state_lock); - } - } - AdminCommand::SendMessage(message) => { - send_message(message, guard, &state_lock); - } - } - - drop(state_lock); - } - } - } - }); - } - - pub fn send(&self, command: AdminCommand) { - self.sender.unbounded_send(command).unwrap(); - } -} diff --git a/src/database/globals.rs b/src/database/globals.rs deleted file mode 100644 index f1cbbd92..00000000 --- a/src/database/globals.rs +++ /dev/null @@ -1,322 +0,0 @@ -use crate::{database::Config, server_server::FedDest, utils, ConduitResult, Error, Result}; -use ruma::{ - api::{ - client::r0::sync::sync_events, - federation::discovery::{ServerSigningKeys, VerifyKey}, - }, - DeviceId, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, ServerSigningKeyId, UserId, -}; -use std::{ - collections::{BTreeMap, HashMap}, - fs, - future::Future, - net::IpAddr, - path::PathBuf, - sync::{Arc, Mutex, RwLock}, - time::{Duration, Instant}, -}; -use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; -use tracing::error; -use trust_dns_resolver::TokioAsyncResolver; - -use super::abstraction::Tree; - -pub const COUNTER: &[u8] = b"c"; - -type WellKnownMap = HashMap, (FedDest, String)>; -type TlsNameMap = HashMap, u16)>; -type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries -type SyncHandle = ( - Option, // since - Receiver>>, // rx -); - -pub struct Globals { - pub actual_destination_cache: Arc>, // actual_destination, host - pub tls_name_override: Arc>, - pub(super) globals: Arc, - config: Config, - keypair: Arc, - dns_resolver: TokioAsyncResolver, - jwt_decoding_key: Option>, - pub(super) server_signingkeys: Arc, - pub bad_event_ratelimiter: Arc>>, - pub bad_signature_ratelimiter: Arc, RateLimitState>>>, - pub servername_ratelimiter: Arc, Arc>>>, - pub sync_receivers: RwLock), SyncHandle>>, - pub roomid_mutex_insert: RwLock>>>, - pub roomid_mutex_state: RwLock>>>, - pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer - pub rotate: RotationHandler, -} - -/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. -/// -/// This is utilized to have sync workers return early and release read locks on the database. -pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>); - -impl RotationHandler { - pub fn new() -> Self { - let (s, r) = broadcast::channel(1); - Self(s, r) - } - - pub fn watch(&self) -> impl Future { - let mut r = self.0.subscribe(); - - async move { - let _ = r.recv().await; - } - } - - pub fn fire(&self) { - let _ = self.0.send(()); - } -} - -impl Default for RotationHandler { - fn default() -> Self { - Self::new() - } -} - -impl Globals { - pub fn load( - globals: Arc, - server_signingkeys: Arc, - config: Config, - ) -> Result { - let keypair_bytes = globals.get(b"keypair")?.map_or_else( - || { - let keypair = utils::generate_keypair(); - globals.insert(b"keypair", &keypair)?; - Ok::<_, Error>(keypair) - }, - |s| Ok(s.to_vec()), - )?; - - let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff); - - let keypair = utils::string_from_bytes( - // 1. version - parts - .next() - .expect("splitn always returns at least one element"), - ) - .map_err(|_| Error::bad_database("Invalid version bytes in keypair.")) - .and_then(|version| { - // 2. key - parts - .next() - .ok_or_else(|| Error::bad_database("Invalid keypair format in database.")) - .map(|key| (version, key)) - }) - .and_then(|(version, key)| { - ruma::signatures::Ed25519KeyPair::from_der(key, version) - .map_err(|_| Error::bad_database("Private or public keys are invalid.")) - }); - - let keypair = match keypair { - Ok(k) => k, - Err(e) => { - error!("Keypair invalid. Deleting..."); - globals.remove(b"keypair")?; - return Err(e); - } - }; - - let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new())); - - let jwt_decoding_key = config - .jwt_secret - .as_ref() - .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes()).into_static()); - - let s = Self { - globals, - config, - keypair: Arc::new(keypair), - dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|_| { - Error::bad_config("Failed to set up trust dns resolver with system config.") - })?, - actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), - tls_name_override, - server_signingkeys, - jwt_decoding_key, - bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), - roomid_mutex_state: RwLock::new(HashMap::new()), - roomid_mutex_insert: RwLock::new(HashMap::new()), - roomid_mutex_federation: RwLock::new(HashMap::new()), - sync_receivers: RwLock::new(HashMap::new()), - rotate: RotationHandler::new(), - }; - - fs::create_dir_all(s.get_media_folder())?; - - Ok(s) - } - - /// Returns this server's keypair. - pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { - &self.keypair - } - - /// Returns a reqwest client which can be used to send requests. - pub fn reqwest_client(&self) -> Result { - let mut reqwest_client_builder = reqwest::Client::builder() - .connect_timeout(Duration::from_secs(30)) - .timeout(Duration::from_secs(60 * 3)) - .pool_max_idle_per_host(1); - if let Some(proxy) = self.config.proxy.to_proxy()? { - reqwest_client_builder = reqwest_client_builder.proxy(proxy); - } - - Ok(reqwest_client_builder) - } - - #[tracing::instrument(skip(self))] - pub fn next_count(&self) -> Result { - utils::u64_from_bytes(&self.globals.increment(COUNTER)?) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) - } - - #[tracing::instrument(skip(self))] - pub fn current_count(&self) -> Result { - self.globals.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Count has invalid bytes.")) - }) - } - - pub fn server_name(&self) -> &ServerName { - self.config.server_name.as_ref() - } - - pub fn max_request_size(&self) -> u32 { - self.config.max_request_size - } - - pub fn allow_registration(&self) -> bool { - self.config.allow_registration - } - - pub fn allow_encryption(&self) -> bool { - self.config.allow_encryption - } - - pub fn allow_federation(&self) -> bool { - self.config.allow_federation - } - - pub fn allow_room_creation(&self) -> bool { - self.config.allow_room_creation - } - - pub fn trusted_servers(&self) -> &[Box] { - &self.config.trusted_servers - } - - pub fn dns_resolver(&self) -> &TokioAsyncResolver { - &self.dns_resolver - } - - pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey<'_>> { - self.jwt_decoding_key.as_ref() - } - - /// TODO: the key valid until timestamp is only honored in room version > 4 - /// Remove the outdated keys and insert the new ones. - /// - /// This doesn't actually check that the keys provided are newer than the old set. - pub fn add_signing_key( - &self, - origin: &ServerName, - new_keys: ServerSigningKeys, - ) -> Result> { - // Not atomic, but this is not critical - let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; - - let mut keys = signingkeys - .and_then(|keys| serde_json::from_slice(&keys).ok()) - .unwrap_or_else(|| { - // Just insert "now", it doesn't matter - ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) - }); - - let ServerSigningKeys { - verify_keys, - old_verify_keys, - .. - } = new_keys; - - keys.verify_keys.extend(verify_keys.into_iter()); - keys.old_verify_keys.extend(old_verify_keys.into_iter()); - - self.server_signingkeys.insert( - origin.as_bytes(), - &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), - )?; - - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - - Ok(tree) - } - - /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. - pub fn signing_keys_for( - &self, - origin: &ServerName, - ) -> Result> { - let signingkeys = self - .server_signingkeys - .get(origin.as_bytes())? - .and_then(|bytes| serde_json::from_slice(&bytes).ok()) - .map(|keys: ServerSigningKeys| { - let mut tree = keys.verify_keys; - tree.extend( - keys.old_verify_keys - .into_iter() - .map(|old| (old.0, VerifyKey::new(old.1.key))), - ); - tree - }) - .unwrap_or_else(BTreeMap::new); - - Ok(signingkeys) - } - - pub fn database_version(&self) -> Result { - self.globals.get(b"version")?.map_or(Ok(0), |version| { - utils::u64_from_bytes(&version) - .map_err(|_| Error::bad_database("Database version id is invalid.")) - }) - } - - pub fn bump_database_version(&self, new_version: u64) -> Result<()> { - self.globals - .insert(b"version", &new_version.to_be_bytes())?; - Ok(()) - } - - pub fn get_media_folder(&self) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.config.database_path.clone()); - r.push("media"); - r - } - - pub fn get_media_file(&self, key: &[u8]) -> PathBuf { - let mut r = PathBuf::new(); - r.push(self.config.database_path.clone()); - r.push("media"); - r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); - r - } -} diff --git a/src/database/account_data.rs b/src/database/key_value/account_data.rs similarity index 74% rename from src/database/account_data.rs rename to src/database/key_value/account_data.rs index 456283bd..e1eef966 100644 --- a/src/database/account_data.rs +++ b/src/database/key_value/account_data.rs @@ -1,30 +1,23 @@ -use crate::{utils, Error, Result}; +use std::collections::HashMap; + use ruma::{ api::client::error::ErrorKind, - events::{AnyEphemeralRoomEvent, EventType}, + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, serde::Raw, RoomId, UserId, }; -use serde::{de::DeserializeOwned, Serialize}; -use std::{collections::HashMap, convert::TryFrom, sync::Arc}; - -use super::abstraction::Tree; -pub struct AccountData { - pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type - pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type -} +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; -impl AccountData { +impl service::account_data::Data for KeyValueDatabase { /// Places one event in the account data of the user and removes the previous entry. - #[tracing::instrument(skip(self, room_id, user_id, event_type, data, globals))] - pub fn update( + #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] + fn update( &self, room_id: Option<&RoomId>, user_id: &UserId, - event_type: EventType, - data: &T, - globals: &super::globals::Globals, + event_type: RoomAccountDataEventType, + data: &serde_json::Value, ) -> Result<()> { let mut prefix = room_id .map(|r| r.to_string()) @@ -36,15 +29,14 @@ impl AccountData { prefix.push(0xff); let mut roomuserdataid = prefix.clone(); - roomuserdataid.extend_from_slice(&globals.next_count()?.to_be_bytes()); + roomuserdataid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); roomuserdataid.push(0xff); - roomuserdataid.extend_from_slice(event_type.as_bytes()); + roomuserdataid.extend_from_slice(event_type.to_string().as_bytes()); let mut key = prefix; - key.extend_from_slice(event_type.as_bytes()); + key.extend_from_slice(event_type.to_string().as_bytes()); - let json = serde_json::to_value(data).expect("all types here can be serialized"); // TODO: maybe add error handling - if json.get("type").is_none() || json.get("content").is_none() { + if data.get("type").is_none() || data.get("content").is_none() { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Account data doesn't have all required fields.", @@ -53,7 +45,7 @@ impl AccountData { self.roomuserdataid_accountdata.insert( &roomuserdataid, - &serde_json::to_vec(&json).expect("to_vec always works on json values"), + &serde_json::to_vec(&data).expect("to_vec always works on json values"), )?; let prev = self.roomusertype_roomuserdataid.get(&key)?; @@ -71,12 +63,12 @@ impl AccountData { /// Searches the account data for a specific kind. #[tracing::instrument(skip(self, room_id, user_id, kind))] - pub fn get( + fn get( &self, room_id: Option<&RoomId>, user_id: &UserId, - kind: EventType, - ) -> Result> { + kind: RoomAccountDataEventType, + ) -> Result>> { let mut key = room_id .map(|r| r.to_string()) .unwrap_or_default() @@ -85,7 +77,7 @@ impl AccountData { key.push(0xff); key.extend_from_slice(user_id.as_bytes()); key.push(0xff); - key.extend_from_slice(kind.as_ref().as_bytes()); + key.extend_from_slice(kind.to_string().as_bytes()); self.roomusertype_roomuserdataid .get(&key)? @@ -104,12 +96,12 @@ impl AccountData { /// Returns all changes to the account data that happened after `since`. #[tracing::instrument(skip(self, room_id, user_id, since))] - pub fn changes_since( + fn changes_since( &self, room_id: Option<&RoomId>, user_id: &UserId, since: u64, - ) -> Result>> { + ) -> Result>> { let mut userdata = HashMap::new(); let mut prefix = room_id @@ -131,7 +123,7 @@ impl AccountData { .take_while(move |(k, _)| k.starts_with(&prefix)) .map(|(k, v)| { Ok::<_, Error>(( - EventType::try_from( + RoomAccountDataEventType::try_from( utils::string_from_bytes(k.rsplit(|&b| b == 0xff).next().ok_or_else( || Error::bad_database("RoomUserData ID in db is invalid."), )?) diff --git a/src/database/appservice.rs b/src/database/key_value/appservice.rs similarity index 51% rename from src/database/appservice.rs rename to src/database/key_value/appservice.rs index 7cc91372..9a821a65 100644 --- a/src/database/appservice.rs +++ b/src/database/key_value/appservice.rs @@ -1,18 +1,8 @@ -use crate::{utils, Error, Result}; -use std::{ - collections::HashMap, - sync::{Arc, RwLock}, -}; +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; -use super::abstraction::Tree; - -pub struct Appservice { - pub(super) cached_registrations: Arc>>, - pub(super) id_appserviceregistrations: Arc, -} - -impl Appservice { - pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result<()> { +impl service::appservice::Data for KeyValueDatabase { + /// Registers an appservice and returns the ID to the caller + fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { // TODO: Rumaify let id = yaml.get("id").unwrap().as_str().unwrap(); self.id_appserviceregistrations.insert( @@ -22,12 +12,27 @@ impl Appservice { self.cached_registrations .write() .unwrap() - .insert(id.to_owned(), yaml); + .insert(id.to_owned(), yaml.to_owned()); + + Ok(id.to_owned()) + } + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + fn unregister_appservice(&self, service_name: &str) -> Result<()> { + self.id_appserviceregistrations + .remove(service_name.as_bytes())?; + self.cached_registrations + .write() + .unwrap() + .remove(service_name); Ok(()) } - pub fn get_registration(&self, id: &str) -> Result> { + fn get_registration(&self, id: &str) -> Result> { self.cached_registrations .read() .unwrap() @@ -49,14 +54,17 @@ impl Appservice { ) } - pub fn iter_ids(&self) -> Result> + '_> { - Ok(self.id_appserviceregistrations.iter().map(|(id, _)| { - utils::string_from_bytes(&id) - .map_err(|_| Error::bad_database("Invalid id bytes in id_appserviceregistrations.")) - })) + fn iter_ids<'a>(&'a self) -> Result> + 'a>> { + Ok(Box::new(self.id_appserviceregistrations.iter().map( + |(id, _)| { + utils::string_from_bytes(&id).map_err(|_| { + Error::bad_database("Invalid id bytes in id_appserviceregistrations.") + }) + }, + ))) } - pub fn all(&self) -> Result> { + fn all(&self) -> Result> { self.iter_ids()? .filter_map(|id| id.ok()) .map(move |id| { diff --git a/src/database/key_value/globals.rs b/src/database/key_value/globals.rs new file mode 100644 index 00000000..7b7675ca --- /dev/null +++ b/src/database/key_value/globals.rs @@ -0,0 +1,233 @@ +use std::collections::BTreeMap; + +use async_trait::async_trait; +use futures_util::{stream::FuturesUnordered, StreamExt}; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + signatures::Ed25519KeyPair, + DeviceId, MilliSecondsSinceUnixEpoch, OwnedServerSigningKeyId, ServerName, UserId, +}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +pub const COUNTER: &[u8] = b"c"; + +#[async_trait] +impl service::globals::Data for KeyValueDatabase { + fn next_count(&self) -> Result { + utils::u64_from_bytes(&self.global.increment(COUNTER)?) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) + } + + fn current_count(&self) -> Result { + self.global.get(COUNTER)?.map_or(Ok(0_u64), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Count has invalid bytes.")) + }) + } + + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + let userid_bytes = user_id.as_bytes().to_vec(); + let mut userid_prefix = userid_bytes.clone(); + userid_prefix.push(0xff); + + let mut userdeviceid_prefix = userid_prefix.clone(); + userdeviceid_prefix.extend_from_slice(device_id.as_bytes()); + userdeviceid_prefix.push(0xff); + + let mut futures = FuturesUnordered::new(); + + // Return when *any* user changed his key + // TODO: only send for user they share a room with + futures.push(self.todeviceid_events.watch_prefix(&userdeviceid_prefix)); + + futures.push(self.userroomid_joined.watch_prefix(&userid_prefix)); + futures.push(self.userroomid_invitestate.watch_prefix(&userid_prefix)); + futures.push(self.userroomid_leftstate.watch_prefix(&userid_prefix)); + futures.push( + self.userroomid_notificationcount + .watch_prefix(&userid_prefix), + ); + futures.push(self.userroomid_highlightcount.watch_prefix(&userid_prefix)); + + // Events for rooms we are in + for room_id in services() + .rooms + .state_cache + .rooms_joined(user_id) + .filter_map(|r| r.ok()) + { + let short_roomid = services() + .rooms + .short + .get_shortroomid(&room_id) + .ok() + .flatten() + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let roomid_bytes = room_id.as_bytes().to_vec(); + let mut roomid_prefix = roomid_bytes.clone(); + roomid_prefix.push(0xff); + + // PDUs + futures.push(self.pduid_pdu.watch_prefix(&short_roomid)); + + // EDUs + futures.push(self.roomid_lasttypingupdate.watch_prefix(&roomid_bytes)); + + futures.push(self.readreceiptid_readreceipt.watch_prefix(&roomid_prefix)); + + // Key changes + futures.push(self.keychangeid_userid.watch_prefix(&roomid_prefix)); + + // Room account data + let mut roomuser_prefix = roomid_prefix.clone(); + roomuser_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.roomusertype_roomuserdataid + .watch_prefix(&roomuser_prefix), + ); + } + + let mut globaluserdata_prefix = vec![0xff]; + globaluserdata_prefix.extend_from_slice(&userid_prefix); + + futures.push( + self.roomusertype_roomuserdataid + .watch_prefix(&globaluserdata_prefix), + ); + + // More key changes (used when user is not joined to any rooms) + futures.push(self.keychangeid_userid.watch_prefix(&userid_prefix)); + + // One time keys + futures.push(self.userid_lastonetimekeyupdate.watch_prefix(&userid_bytes)); + + futures.push(Box::pin(services().globals.rotate.watch())); + + // Wait until one of them finds something + futures.next().await; + + Ok(()) + } + + fn cleanup(&self) -> Result<()> { + self._db.cleanup() + } + + fn memory_usage(&self) -> Result { + self._db.memory_usage() + } + + fn load_keypair(&self) -> Result { + let keypair_bytes = self.global.get(b"keypair")?.map_or_else( + || { + let keypair = utils::generate_keypair(); + self.global.insert(b"keypair", &keypair)?; + Ok::<_, Error>(keypair) + }, + |s| Ok(s.to_vec()), + )?; + + let mut parts = keypair_bytes.splitn(2, |&b| b == 0xff); + + utils::string_from_bytes( + // 1. version + parts + .next() + .expect("splitn always returns at least one element"), + ) + .map_err(|_| Error::bad_database("Invalid version bytes in keypair.")) + .and_then(|version| { + // 2. key + parts + .next() + .ok_or_else(|| Error::bad_database("Invalid keypair format in database.")) + .map(|key| (version, key)) + }) + .and_then(|(version, key)| { + Ed25519KeyPair::from_der(key, version) + .map_err(|_| Error::bad_database("Private or public keys are invalid.")) + }) + } + fn remove_keypair(&self) -> Result<()> { + self.global.remove(b"keypair") + } + + fn add_signing_key( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result> { + // Not atomic, but this is not critical + let signingkeys = self.server_signingkeys.get(origin.as_bytes())?; + + let mut keys = signingkeys + .and_then(|keys| serde_json::from_slice(&keys).ok()) + .unwrap_or_else(|| { + // Just insert "now", it doesn't matter + ServerSigningKeys::new(origin.to_owned(), MilliSecondsSinceUnixEpoch::now()) + }); + + let ServerSigningKeys { + verify_keys, + old_verify_keys, + .. + } = new_keys; + + keys.verify_keys.extend(verify_keys.into_iter()); + keys.old_verify_keys.extend(old_verify_keys.into_iter()); + + self.server_signingkeys.insert( + origin.as_bytes(), + &serde_json::to_vec(&keys).expect("serversigningkeys can be serialized"), + )?; + + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + + Ok(tree) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result> { + let signingkeys = self + .server_signingkeys + .get(origin.as_bytes())? + .and_then(|bytes| serde_json::from_slice(&bytes).ok()) + .map(|keys: ServerSigningKeys| { + let mut tree = keys.verify_keys; + tree.extend( + keys.old_verify_keys + .into_iter() + .map(|old| (old.0, VerifyKey::new(old.1.key))), + ); + tree + }) + .unwrap_or_else(BTreeMap::new); + + Ok(signingkeys) + } + + fn database_version(&self) -> Result { + self.global.get(b"version")?.map_or(Ok(0), |version| { + utils::u64_from_bytes(&version) + .map_err(|_| Error::bad_database("Database version id is invalid.")) + }) + } + + fn bump_database_version(&self, new_version: u64) -> Result<()> { + self.global.insert(b"version", &new_version.to_be_bytes())?; + Ok(()) + } +} diff --git a/src/database/key_backups.rs b/src/database/key_value/key_backups.rs similarity index 79% rename from src/database/key_backups.rs rename to src/database/key_value/key_backups.rs index 98ea0111..900b700b 100644 --- a/src/database/key_backups.rs +++ b/src/database/key_value/key_backups.rs @@ -1,29 +1,23 @@ -use crate::{utils, Error, Result}; +use std::collections::BTreeMap; + use ruma::{ api::client::{ + backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, error::ErrorKind, - r0::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, }, - RoomId, UserId, + serde::Raw, + OwnedRoomId, RoomId, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, sync::Arc}; - -use super::abstraction::Tree; -pub struct KeyBackups { - pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) - pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) - pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId -} +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; -impl KeyBackups { - pub fn create_backup( +impl service::key_backups::Data for KeyValueDatabase { + fn create_backup( &self, user_id: &UserId, - backup_metadata: &BackupAlgorithm, - globals: &super::globals::Globals, + backup_metadata: &Raw, ) -> Result { - let version = globals.next_count()?.to_string(); + let version = services().globals.next_count()?.to_string(); let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -34,11 +28,11 @@ impl KeyBackups { &serde_json::to_vec(backup_metadata).expect("BackupAlgorithm::to_vec always works"), )?; self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; Ok(version) } - pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { + fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -55,12 +49,11 @@ impl KeyBackups { Ok(()) } - pub fn update_backup( + fn update_backup( &self, user_id: &UserId, version: &str, - backup_metadata: &BackupAlgorithm, - globals: &super::globals::Globals, + backup_metadata: &Raw, ) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -73,18 +66,14 @@ impl KeyBackups { )); } - self.backupid_algorithm.insert( - &key, - serde_json::to_string(backup_metadata) - .expect("BackupAlgorithm::to_string always works") - .as_bytes(), - )?; + self.backupid_algorithm + .insert(&key, backup_metadata.json().get().as_bytes())?; self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; Ok(version.to_owned()) } - pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { + fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); let mut last_possible_key = prefix.clone(); @@ -105,7 +94,10 @@ impl KeyBackups { .transpose() } - pub fn get_latest_backup(&self, user_id: &UserId) -> Result> { + fn get_latest_backup( + &self, + user_id: &UserId, + ) -> Result)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); let mut last_possible_key = prefix.clone(); @@ -133,7 +125,7 @@ impl KeyBackups { .transpose() } - pub fn get_backup(&self, user_id: &UserId, version: &str) -> Result> { + fn get_backup(&self, user_id: &UserId, version: &str) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -146,14 +138,13 @@ impl KeyBackups { }) } - pub fn add_key( + fn add_key( &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - key_data: &KeyBackupData, - globals: &super::globals::Globals, + key_data: &Raw, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -167,22 +158,20 @@ impl KeyBackups { } self.backupid_etag - .insert(&key, &globals.next_count()?.to_be_bytes())?; + .insert(&key, &services().globals.next_count()?.to_be_bytes())?; key.push(0xff); key.extend_from_slice(room_id.as_bytes()); key.push(0xff); key.extend_from_slice(session_id.as_bytes()); - self.backupkeyid_backup.insert( - &key, - &serde_json::to_vec(&key_data).expect("KeyBackupData::to_vec always works"), - )?; + self.backupkeyid_backup + .insert(&key, key_data.json().get().as_bytes())?; Ok(()) } - pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { + fn count_keys(&self, user_id: &UserId, version: &str) -> Result { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -190,7 +179,7 @@ impl KeyBackups { Ok(self.backupkeyid_backup.scan_prefix(prefix).count()) } - pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { + fn get_etag(&self, user_id: &UserId, version: &str) -> Result { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -205,17 +194,17 @@ impl KeyBackups { .to_string()) } - pub fn get_all( + fn get_all( &self, user_id: &UserId, version: &str, - ) -> Result> { + ) -> Result> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); prefix.push(0xff); - let mut rooms = BTreeMap::::new(); + let mut rooms = BTreeMap::::new(); for result in self .backupkeyid_backup @@ -231,7 +220,7 @@ impl KeyBackups { Error::bad_database("backupkeyid_backup session_id is invalid.") })?; - let room_id = RoomId::try_from( + let room_id = RoomId::parse( utils::string_from_bytes(parts.next().ok_or_else(|| { Error::bad_database("backupkeyid_backup key is invalid.") })?) @@ -261,12 +250,12 @@ impl KeyBackups { Ok(rooms) } - pub fn get_room( + fn get_room( &self, user_id: &UserId, version: &str, room_id: &RoomId, - ) -> Result> { + ) -> Result>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(version.as_bytes()); @@ -298,13 +287,13 @@ impl KeyBackups { .collect()) } - pub fn get_session( + fn get_session( &self, user_id: &UserId, version: &str, room_id: &RoomId, session_id: &str, - ) -> Result> { + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -323,7 +312,7 @@ impl KeyBackups { .transpose() } - pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { + fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -336,12 +325,7 @@ impl KeyBackups { Ok(()) } - pub fn delete_room_keys( - &self, - user_id: &UserId, - version: &str, - room_id: &RoomId, - ) -> Result<()> { + fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(version.as_bytes()); @@ -356,7 +340,7 @@ impl KeyBackups { Ok(()) } - pub fn delete_room_key( + fn delete_room_key( &self, user_id: &UserId, version: &str, diff --git a/src/database/key_value/media.rs b/src/database/key_value/media.rs new file mode 100644 index 00000000..6abe5ba5 --- /dev/null +++ b/src/database/key_value/media.rs @@ -0,0 +1,82 @@ +use ruma::api::client::error::ErrorKind; + +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; + +impl service::media::Data for KeyValueDatabase { + fn create_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + content_disposition: Option<&str>, + content_type: Option<&str>, + ) -> Result> { + let mut key = mxc.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(&width.to_be_bytes()); + key.extend_from_slice(&height.to_be_bytes()); + key.push(0xff); + key.extend_from_slice( + content_disposition + .as_ref() + .map(|f| f.as_bytes()) + .unwrap_or_default(), + ); + key.push(0xff); + key.extend_from_slice( + content_type + .as_ref() + .map(|c| c.as_bytes()) + .unwrap_or_default(), + ); + + self.mediaid_file.insert(&key, &[])?; + + Ok(key) + } + + fn search_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + ) -> Result<(Option, Option, Vec)> { + let mut prefix = mxc.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(&width.to_be_bytes()); + prefix.extend_from_slice(&height.to_be_bytes()); + prefix.push(0xff); + + let (key, _) = self + .mediaid_file + .scan_prefix(prefix) + .next() + .ok_or(Error::BadRequest(ErrorKind::NotFound, "Media not found"))?; + + let mut parts = key.rsplit(|&b| b == 0xff); + + let content_type = parts + .next() + .map(|bytes| { + utils::string_from_bytes(bytes).map_err(|_| { + Error::bad_database("Content type in mediaid_file is invalid unicode.") + }) + }) + .transpose()?; + + let content_disposition_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; + + let content_disposition = if content_disposition_bytes.is_empty() { + None + } else { + Some( + utils::string_from_bytes(content_disposition_bytes).map_err(|_| { + Error::bad_database("Content Disposition in mediaid_file is invalid unicode.") + })?, + ) + }; + Ok((content_disposition, content_type, key)) + } +} diff --git a/src/database/key_value/mod.rs b/src/database/key_value/mod.rs new file mode 100644 index 00000000..c4496af8 --- /dev/null +++ b/src/database/key_value/mod.rs @@ -0,0 +1,13 @@ +mod account_data; +//mod admin; +mod appservice; +mod globals; +mod key_backups; +mod media; +//mod pdu; +mod pusher; +mod rooms; +mod sending; +mod transaction_ids; +mod uiaa; +mod users; diff --git a/src/database/key_value/pusher.rs b/src/database/key_value/pusher.rs new file mode 100644 index 00000000..3dfceb6a --- /dev/null +++ b/src/database/key_value/pusher.rs @@ -0,0 +1,81 @@ +use ruma::{ + api::client::push::{get_pushers, set_pusher}, + UserId, +}; + +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; + +impl service::pusher::Data for KeyValueDatabase { + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { + let mut key = sender.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(pusher.pushkey.as_bytes()); + + // There are 2 kinds of pushers but the spec says: null deletes the pusher. + if pusher.kind.is_none() { + return self + .senderkey_pusher + .remove(&key) + .map(|_| ()) + .map_err(Into::into); + } + + self.senderkey_pusher.insert( + &key, + &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), + )?; + + Ok(()) + } + + fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result> { + let mut senderkey = sender.as_bytes().to_vec(); + senderkey.push(0xff); + senderkey.extend_from_slice(pushkey.as_bytes()); + + self.senderkey_pusher + .get(&senderkey)? + .map(|push| { + serde_json::from_slice(&push) + .map_err(|_| Error::bad_database("Invalid Pusher in db.")) + }) + .transpose() + } + + fn get_pushers(&self, sender: &UserId) -> Result> { + let mut prefix = sender.as_bytes().to_vec(); + prefix.push(0xff); + + self.senderkey_pusher + .scan_prefix(prefix) + .map(|(_, push)| { + serde_json::from_slice(&push) + .map_err(|_| Error::bad_database("Invalid Pusher in db.")) + }) + .collect() + } + + fn get_pushkeys<'a>( + &'a self, + sender: &UserId, + ) -> Box> + 'a> { + let mut prefix = sender.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| { + let mut parts = k.splitn(2, |&b| b == 0xff); + let _senderkey = parts.next(); + let push_key = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid senderkey_pusher in db"))?; + let push_key_string = utils::string_from_bytes(push_key) + .map_err(|_| Error::bad_database("Invalid pusher bytes in senderkey_pusher"))?; + + Ok(push_key_string) + })) + } +} diff --git a/src/database/key_value/rooms/alias.rs b/src/database/key_value/rooms/alias.rs new file mode 100644 index 00000000..6f230323 --- /dev/null +++ b/src/database/key_value/rooms/alias.rs @@ -0,0 +1,60 @@ +use ruma::{api::client::error::ErrorKind, OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::alias::Data for KeyValueDatabase { + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { + self.alias_roomid + .insert(alias.alias().as_bytes(), room_id.as_bytes())?; + let mut aliasid = room_id.as_bytes().to_vec(); + aliasid.push(0xff); + aliasid.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + self.aliasid_alias.insert(&aliasid, alias.as_bytes())?; + Ok(()) + } + + fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { + if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { + let mut prefix = room_id.to_vec(); + prefix.push(0xff); + + for (key, _) in self.aliasid_alias.scan_prefix(prefix) { + self.aliasid_alias.remove(&key)?; + } + self.alias_roomid.remove(alias.alias().as_bytes())?; + } else { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Alias does not exist.", + )); + } + Ok(()) + } + + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { + self.alias_roomid + .get(alias.alias().as_bytes())? + .map(|bytes| { + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in alias_roomid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) + }) + .transpose() + } + + fn local_aliases_for_room<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { + utils::string_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? + .try_into() + .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) + })) + } +} diff --git a/src/database/key_value/rooms/auth_chain.rs b/src/database/key_value/rooms/auth_chain.rs new file mode 100644 index 00000000..60057ac1 --- /dev/null +++ b/src/database/key_value/rooms/auth_chain.rs @@ -0,0 +1,61 @@ +use std::{collections::HashSet, mem::size_of, sync::Arc}; + +use crate::{database::KeyValueDatabase, service, utils, Result}; + +impl service::rooms::auth_chain::Data for KeyValueDatabase { + fn get_cached_eventid_authchain(&self, key: &[u64]) -> Result>>> { + // Check RAM cache + if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { + return Ok(Some(Arc::clone(result))); + } + + // We only save auth chains for single events in the db + if key.len() == 1 { + // Check DB cache + let chain = self + .shorteventid_authchain + .get(&key[0].to_be_bytes())? + .map(|chain| { + chain + .chunks_exact(size_of::()) + .map(|chunk| utils::u64_from_bytes(chunk).expect("byte length is correct")) + .collect() + }); + + if let Some(chain) = chain { + let chain = Arc::new(chain); + + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(vec![key[0]], Arc::clone(&chain)); + + return Ok(Some(chain)); + } + } + + Ok(None) + } + + fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { + // Only persist single events in db + if key.len() == 1 { + self.shorteventid_authchain.insert( + &key[0].to_be_bytes(), + &auth_chain + .iter() + .flat_map(|s| s.to_be_bytes().to_vec()) + .collect::>(), + )?; + } + + // Cache in RAM + self.auth_chain_cache + .lock() + .unwrap() + .insert(key, auth_chain); + + Ok(()) + } +} diff --git a/src/database/key_value/rooms/directory.rs b/src/database/key_value/rooms/directory.rs new file mode 100644 index 00000000..e05dee82 --- /dev/null +++ b/src/database/key_value/rooms/directory.rs @@ -0,0 +1,28 @@ +use ruma::{OwnedRoomId, RoomId}; + +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; + +impl service::rooms::directory::Data for KeyValueDatabase { + fn set_public(&self, room_id: &RoomId) -> Result<()> { + self.publicroomids.insert(room_id.as_bytes(), &[]) + } + + fn set_not_public(&self, room_id: &RoomId) -> Result<()> { + self.publicroomids.remove(room_id.as_bytes()) + } + + fn is_public_room(&self, room_id: &RoomId) -> Result { + Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) + } + + fn public_rooms<'a>(&'a self) -> Box> + 'a> { + Box::new(self.publicroomids.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) + })) + } +} diff --git a/src/database/key_value/rooms/edus/mod.rs b/src/database/key_value/rooms/edus/mod.rs new file mode 100644 index 00000000..6c652918 --- /dev/null +++ b/src/database/key_value/rooms/edus/mod.rs @@ -0,0 +1,7 @@ +mod presence; +mod read_receipt; +mod typing; + +use crate::{database::KeyValueDatabase, service}; + +impl service::rooms::edus::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/edus/presence.rs b/src/database/key_value/rooms/edus/presence.rs new file mode 100644 index 00000000..904b1c44 --- /dev/null +++ b/src/database/key_value/rooms/edus/presence.rs @@ -0,0 +1,152 @@ +use std::collections::HashMap; + +use ruma::{ + events::presence::PresenceEvent, presence::PresenceState, OwnedUserId, RoomId, UInt, UserId, +}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::edus::presence::Data for KeyValueDatabase { + fn update_presence( + &self, + user_id: &UserId, + room_id: &RoomId, + presence: PresenceEvent, + ) -> Result<()> { + // TODO: Remove old entry? Or maybe just wipe completely from time to time? + + let count = services().globals.next_count()?.to_be_bytes(); + + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count); + presence_id.push(0xff); + presence_id.extend_from_slice(presence.sender.as_bytes()); + + self.presenceid_presence.insert( + &presence_id, + &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), + )?; + + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + + Ok(()) + } + + fn ping_presence(&self, user_id: &UserId) -> Result<()> { + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + + Ok(()) + } + + fn last_presence_update(&self, user_id: &UserId) -> Result> { + self.userid_lastpresenceupdate + .get(user_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") + }) + }) + .transpose() + } + + fn get_presence_event( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + ) -> Result> { + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count.to_be_bytes()); + presence_id.push(0xff); + presence_id.extend_from_slice(user_id.as_bytes()); + + self.presenceid_presence + .get(&presence_id)? + .map(|value| parse_presence_event(&value)) + .transpose() + } + + fn presence_since( + &self, + room_id: &RoomId, + since: u64, + ) -> Result> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + let mut hashmap = HashMap::new(); + + for (key, value) in self + .presenceid_presence + .iter_from(&first_possible_edu, false) + .take_while(|(key, _)| key.starts_with(&prefix)) + { + let user_id = UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, + ) + .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; + + let presence = parse_presence_event(&value)?; + + hashmap.insert(user_id, presence); + } + + Ok(hashmap) + } + + /* + fn presence_maintain(&self, db: Arc>) { + // TODO @M0dEx: move this to a timed tasks module + tokio::spawn(async move { + loop { + select! { + Some(user_id) = self.presence_timers.next() { + // TODO @M0dEx: would it be better to acquire the lock outside the loop? + let guard = db.read().await; + + // TODO @M0dEx: add self.presence_timers + // TODO @M0dEx: maintain presence + } + } + } + }); + } + */ +} + +fn parse_presence_event(bytes: &[u8]) -> Result { + let mut presence: PresenceEvent = serde_json::from_slice(bytes) + .map_err(|_| Error::bad_database("Invalid presence event in db."))?; + + let current_timestamp: UInt = utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"); + + if presence.content.presence == PresenceState::Online { + // Don't set last_active_ago when the user is online + presence.content.last_active_ago = None; + } else { + // Convert from timestamp to duration + presence.content.last_active_ago = presence + .content + .last_active_ago + .map(|timestamp| current_timestamp - timestamp); + } + + Ok(presence) +} diff --git a/src/database/key_value/rooms/edus/read_receipt.rs b/src/database/key_value/rooms/edus/read_receipt.rs new file mode 100644 index 00000000..fa97ea34 --- /dev/null +++ b/src/database/key_value/rooms/edus/read_receipt.rs @@ -0,0 +1,150 @@ +use std::mem; + +use ruma::{ + events::receipt::ReceiptEvent, serde::Raw, CanonicalJsonObject, OwnedUserId, RoomId, UserId, +}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::edus::read_receipt::Data for KeyValueDatabase { + fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: ReceiptEvent, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + // Remove old entry + if let Some((old, _)) = self + .readreceiptid_readreceipt + .iter_from(&last_possible_key, true) + .take_while(|(key, _)| key.starts_with(&prefix)) + .find(|(key, _)| { + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element") + == user_id.as_bytes() + }) + { + // This is the old room_latest + self.readreceiptid_readreceipt.remove(&old)?; + } + + let mut room_latest_id = prefix; + room_latest_id.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); + room_latest_id.push(0xff); + room_latest_id.extend_from_slice(user_id.as_bytes()); + + self.readreceiptid_readreceipt.insert( + &room_latest_id, + &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), + )?; + + Ok(()) + } + + fn readreceipts_since<'a>( + &'a self, + room_id: &RoomId, + since: u64, + ) -> Box< + dyn Iterator< + Item = Result<( + OwnedUserId, + u64, + Raw, + )>, + > + 'a, + > { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + let prefix2 = prefix.clone(); + + let mut first_possible_edu = prefix.clone(); + first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since + + Box::new( + self.readreceiptid_readreceipt + .iter_from(&first_possible_edu, false) + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(move |(k, v)| { + let count = utils::u64_from_bytes( + &k[prefix.len()..prefix.len() + mem::size_of::()], + ) + .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; + let user_id = UserId::parse( + utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) + .map_err(|_| { + Error::bad_database("Invalid readreceiptid userid bytes in db.") + })?, + ) + .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; + + let mut json = + serde_json::from_slice::(&v).map_err(|_| { + Error::bad_database( + "Read receipt in roomlatestid_roomlatest is invalid json.", + ) + })?; + json.remove("room_id"); + + Ok(( + user_id, + count, + Raw::from_json( + serde_json::value::to_raw_value(&json) + .expect("json is valid raw value"), + ), + )) + }), + ) + } + + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_privateread + .insert(&key, &count.to_be_bytes())?; + + self.roomuserid_lastprivatereadupdate + .insert(&key, &services().globals.next_count()?.to_be_bytes()) + } + + fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_privateread + .get(&key)? + .map_or(Ok(None), |v| { + Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { + Error::bad_database("Invalid private read marker bytes") + })?)) + }) + } + + fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + Ok(self + .roomuserid_lastprivatereadupdate + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") + }) + }) + .transpose()? + .unwrap_or(0)) + } +} diff --git a/src/database/key_value/rooms/edus/typing.rs b/src/database/key_value/rooms/edus/typing.rs new file mode 100644 index 00000000..4a2f0f96 --- /dev/null +++ b/src/database/key_value/rooms/edus/typing.rs @@ -0,0 +1,86 @@ +use std::collections::HashSet; + +use ruma::{OwnedUserId, RoomId, UserId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::edus::typing::Data for KeyValueDatabase { + fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let count = services().globals.next_count()?.to_be_bytes(); + + let mut room_typing_id = prefix; + room_typing_id.extend_from_slice(&timeout.to_be_bytes()); + room_typing_id.push(0xff); + room_typing_id.extend_from_slice(&count); + + self.typingid_userid + .insert(&room_typing_id, user_id.as_bytes())?; + + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &count)?; + + Ok(()) + } + + fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let user_id = user_id.to_string(); + + let mut found_outdated = false; + + // Maybe there are multiple ones from calling roomtyping_add multiple times + for outdated_edu in self + .typingid_userid + .scan_prefix(prefix) + .filter(|(_, v)| &**v == user_id.as_bytes()) + { + self.typingid_userid.remove(&outdated_edu.0)?; + found_outdated = true; + } + + if found_outdated { + self.roomid_lasttypingupdate.insert( + room_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; + } + + Ok(()) + } + + fn last_typing_update(&self, room_id: &RoomId) -> Result { + Ok(self + .roomid_lasttypingupdate + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") + }) + }) + .transpose()? + .unwrap_or(0)) + } + + fn typings_all(&self, room_id: &RoomId) -> Result> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let mut user_ids = HashSet::new(); + + for (_, user_id) in self.typingid_userid.scan_prefix(prefix) { + let user_id = UserId::parse(utils::string_from_bytes(&user_id).map_err(|_| { + Error::bad_database("User ID in typingid_userid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid."))?; + + user_ids.insert(user_id); + } + + Ok(user_ids) + } +} diff --git a/src/database/key_value/rooms/lazy_load.rs b/src/database/key_value/rooms/lazy_load.rs new file mode 100644 index 00000000..a19d52cb --- /dev/null +++ b/src/database/key_value/rooms/lazy_load.rs @@ -0,0 +1,65 @@ +use ruma::{DeviceId, RoomId, UserId}; + +use crate::{database::KeyValueDatabase, service, Result}; + +impl service::rooms::lazy_loading::Data for KeyValueDatabase { + fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(device_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + key.push(0xff); + key.extend_from_slice(ll_user.as_bytes()); + Ok(self.lazyloadedids.get(&key)?.is_some()) + } + + fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + confirmed_user_ids: &mut dyn Iterator, + ) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); + + for ll_id in confirmed_user_ids { + let mut key = prefix.clone(); + key.extend_from_slice(ll_id.as_bytes()); + self.lazyloadedids.insert(&key, &[])?; + } + + Ok(()) + } + + fn lazy_load_reset( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ) -> Result<()> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + prefix.extend_from_slice(device_id.as_bytes()); + prefix.push(0xff); + prefix.extend_from_slice(room_id.as_bytes()); + prefix.push(0xff); + + for (key, _) in self.lazyloadedids.scan_prefix(prefix) { + self.lazyloadedids.remove(&key)?; + } + + Ok(()) + } +} diff --git a/src/database/key_value/rooms/metadata.rs b/src/database/key_value/rooms/metadata.rs new file mode 100644 index 00000000..57540c40 --- /dev/null +++ b/src/database/key_value/rooms/metadata.rs @@ -0,0 +1,45 @@ +use ruma::{OwnedRoomId, RoomId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::metadata::Data for KeyValueDatabase { + fn exists(&self, room_id: &RoomId) -> Result { + let prefix = match services().rooms.short.get_shortroomid(room_id)? { + Some(b) => b.to_be_bytes().to_vec(), + None => return Ok(false), + }; + + // Look for PDUs in that room. + Ok(self + .pduid_pdu + .iter_from(&prefix, false) + .next() + .filter(|(k, _)| k.starts_with(&prefix)) + .is_some()) + } + + fn iter_ids<'a>(&'a self) -> Box> + 'a> { + Box::new(self.roomid_shortroomid.iter().map(|(bytes, _)| { + RoomId::parse( + utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Room ID in publicroomids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in roomid_shortroomid is invalid.")) + })) + } + + fn is_disabled(&self, room_id: &RoomId) -> Result { + Ok(self.disabledroomids.get(room_id.as_bytes())?.is_some()) + } + + fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { + if disabled { + self.disabledroomids.insert(room_id.as_bytes(), &[])?; + } else { + self.disabledroomids.remove(room_id.as_bytes())?; + } + + Ok(()) + } +} diff --git a/src/database/key_value/rooms/mod.rs b/src/database/key_value/rooms/mod.rs new file mode 100644 index 00000000..406943ed --- /dev/null +++ b/src/database/key_value/rooms/mod.rs @@ -0,0 +1,20 @@ +mod alias; +mod auth_chain; +mod directory; +mod edus; +mod lazy_load; +mod metadata; +mod outlier; +mod pdu_metadata; +mod search; +mod short; +mod state; +mod state_accessor; +mod state_cache; +mod state_compressor; +mod timeline; +mod user; + +use crate::{database::KeyValueDatabase, service}; + +impl service::rooms::Data for KeyValueDatabase {} diff --git a/src/database/key_value/rooms/outlier.rs b/src/database/key_value/rooms/outlier.rs new file mode 100644 index 00000000..7985ba81 --- /dev/null +++ b/src/database/key_value/rooms/outlier.rs @@ -0,0 +1,28 @@ +use ruma::{CanonicalJsonObject, EventId}; + +use crate::{database::KeyValueDatabase, service, Error, PduEvent, Result}; + +impl service::rooms::outlier::Data for KeyValueDatabase { + fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + + fn get_outlier_pdu(&self, event_id: &EventId) -> Result> { + self.eventid_outlierpdu + .get(event_id.as_bytes())? + .map_or(Ok(None), |pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + } + + fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + self.eventid_outlierpdu.insert( + event_id.as_bytes(), + &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), + ) + } +} diff --git a/src/database/key_value/rooms/pdu_metadata.rs b/src/database/key_value/rooms/pdu_metadata.rs new file mode 100644 index 00000000..76ec7346 --- /dev/null +++ b/src/database/key_value/rooms/pdu_metadata.rs @@ -0,0 +1,33 @@ +use std::sync::Arc; + +use ruma::{EventId, RoomId}; + +use crate::{database::KeyValueDatabase, service, Result}; + +impl service::rooms::pdu_metadata::Data for KeyValueDatabase { + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + for prev in event_ids { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(prev.as_bytes()); + self.referencedevents.insert(&key, &[])?; + } + + Ok(()) + } + + fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + let mut key = room_id.as_bytes().to_vec(); + key.extend_from_slice(event_id.as_bytes()); + Ok(self.referencedevents.get(&key)?.is_some()) + } + + fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + self.softfailedeventids.insert(event_id.as_bytes(), &[]) + } + + fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + self.softfailedeventids + .get(event_id.as_bytes()) + .map(|o| o.is_some()) + } +} diff --git a/src/database/key_value/rooms/search.rs b/src/database/key_value/rooms/search.rs new file mode 100644 index 00000000..19ae57b4 --- /dev/null +++ b/src/database/key_value/rooms/search.rs @@ -0,0 +1,75 @@ +use std::mem::size_of; + +use ruma::RoomId; + +use crate::{database::KeyValueDatabase, service, services, utils, Result}; + +impl service::rooms::search::Data for KeyValueDatabase { + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { + let mut batch = message_body + .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .filter(|word| word.len() <= 50) + .map(str::to_lowercase) + .map(|word| { + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(word.as_bytes()); + key.push(0xff); + key.extend_from_slice(pdu_id); + (key, Vec::new()) + }); + + self.tokenids.insert_batch(&mut batch) + } + + fn search_pdus<'a>( + &'a self, + room_id: &RoomId, + search_string: &str, + ) -> Result> + 'a>, Vec)>> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + let prefix_clone = prefix.clone(); + + let words: Vec<_> = search_string + .split_terminator(|c: char| !c.is_alphanumeric()) + .filter(|s| !s.is_empty()) + .map(str::to_lowercase) + .collect(); + + let iterators = words.clone().into_iter().map(move |word| { + let mut prefix2 = prefix.clone(); + prefix2.extend_from_slice(word.as_bytes()); + prefix2.push(0xff); + + let mut last_possible_id = prefix2.clone(); + last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.tokenids + .iter_from(&last_possible_id, true) // Newest pdus first + .take_while(move |(k, _)| k.starts_with(&prefix2)) + .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) + }); + + let common_elements = match utils::common_elements(iterators, |a, b| { + // We compare b with a because we reversed the iterator earlier + b.cmp(a) + }) { + Some(it) => it, + None => return Ok(None), + }; + + let mapped = common_elements.map(move |id| { + let mut pduid = prefix_clone.clone(); + pduid.extend_from_slice(&id); + pduid + }); + + Ok(Some((Box::new(mapped), words))) + } +} diff --git a/src/database/key_value/rooms/short.rs b/src/database/key_value/rooms/short.rs new file mode 100644 index 00000000..c0223170 --- /dev/null +++ b/src/database/key_value/rooms/short.rs @@ -0,0 +1,218 @@ +use std::sync::Arc; + +use ruma::{events::StateEventType, EventId, RoomId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::short::Data for KeyValueDatabase { + fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { + if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { + return Ok(*short); + } + + let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { + Some(shorteventid) => utils::u64_from_bytes(&shorteventid) + .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, + None => { + let shorteventid = services().globals.next_count()?; + self.eventid_shorteventid + .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; + self.shorteventid_eventid + .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; + shorteventid + } + }; + + self.eventidshort_cache + .lock() + .unwrap() + .insert(event_id.to_owned(), short); + + Ok(short) + } + + fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result> { + if let Some(short) = self + .statekeyshort_cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + return Ok(Some(*short)); + } + + let mut statekey = event_type.to_string().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = self + .statekey_shortstatekey + .get(&statekey)? + .map(|shortstatekey| { + utils::u64_from_bytes(&shortstatekey) + .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) + }) + .transpose()?; + + if let Some(s) = short { + self.statekeyshort_cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), s); + } + + Ok(short) + } + + fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + if let Some(short) = self + .statekeyshort_cache + .lock() + .unwrap() + .get_mut(&(event_type.clone(), state_key.to_owned())) + { + return Ok(*short); + } + + let mut statekey = event_type.to_string().as_bytes().to_vec(); + statekey.push(0xff); + statekey.extend_from_slice(state_key.as_bytes()); + + let short = match self.statekey_shortstatekey.get(&statekey)? { + Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) + .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, + None => { + let shortstatekey = services().globals.next_count()?; + self.statekey_shortstatekey + .insert(&statekey, &shortstatekey.to_be_bytes())?; + self.shortstatekey_statekey + .insert(&shortstatekey.to_be_bytes(), &statekey)?; + shortstatekey + } + }; + + self.statekeyshort_cache + .lock() + .unwrap() + .insert((event_type.clone(), state_key.to_owned()), short); + + Ok(short) + } + + fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + if let Some(id) = self + .shorteventid_cache + .lock() + .unwrap() + .get_mut(&shorteventid) + { + return Ok(Arc::clone(id)); + } + + let bytes = self + .shorteventid_eventid + .get(&shorteventid.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; + + let event_id = EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?; + + self.shorteventid_cache + .lock() + .unwrap() + .insert(shorteventid, Arc::clone(&event_id)); + + Ok(event_id) + } + + fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + if let Some(id) = self + .shortstatekey_cache + .lock() + .unwrap() + .get_mut(&shortstatekey) + { + return Ok(id.clone()); + } + + let bytes = self + .shortstatekey_statekey + .get(&shortstatekey.to_be_bytes())? + .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; + + let mut parts = bytes.splitn(2, |&b| b == 0xff); + let eventtype_bytes = parts.next().expect("split always returns one entry"); + let statekey_bytes = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; + + let event_type = + StateEventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { + Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; + + let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { + Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") + })?; + + let result = (event_type, state_key); + + self.shortstatekey_cache + .lock() + .unwrap() + .insert(shortstatekey, result.clone()); + + Ok(result) + } + + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { + Ok(match self.statehash_shortstatehash.get(state_hash)? { + Some(shortstatehash) => ( + utils::u64_from_bytes(&shortstatehash) + .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, + true, + ), + None => { + let shortstatehash = services().globals.next_count()?; + self.statehash_shortstatehash + .insert(state_hash, &shortstatehash.to_be_bytes())?; + (shortstatehash, false) + } + }) + } + + fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.roomid_shortroomid + .get(room_id.as_bytes())? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) + }) + .transpose() + } + + fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { + Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { + Some(short) => utils::u64_from_bytes(&short) + .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, + None => { + let short = services().globals.next_count()?; + self.roomid_shortroomid + .insert(room_id.as_bytes(), &short.to_be_bytes())?; + short + } + }) + } +} diff --git a/src/database/key_value/rooms/state.rs b/src/database/key_value/rooms/state.rs new file mode 100644 index 00000000..f17d37bb --- /dev/null +++ b/src/database/key_value/rooms/state.rs @@ -0,0 +1,73 @@ +use ruma::{EventId, OwnedEventId, RoomId}; +use std::collections::HashSet; + +use std::sync::Arc; +use tokio::sync::MutexGuard; + +use crate::{database::KeyValueDatabase, service, utils, Error, Result}; + +impl service::rooms::state::Data for KeyValueDatabase { + fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.roomid_shortstatehash + .get(room_id.as_bytes())? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") + })?)) + }) + } + + fn set_room_state( + &self, + room_id: &RoomId, + new_shortstatehash: u64, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.roomid_shortstatehash + .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; + Ok(()) + } + + fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()> { + self.shorteventid_shortstatehash + .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; + Ok(()) + } + + fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + self.roomid_pduleaves + .scan_prefix(prefix) + .map(|(_, bytes)| { + EventId::parse_arc(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") + })?) + .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) + }) + .collect() + } + + fn set_forward_extremities<'a>( + &self, + room_id: &RoomId, + event_ids: Vec, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { + self.roomid_pduleaves.remove(&key)?; + } + + for event_id in event_ids { + let mut key = prefix.to_owned(); + key.extend_from_slice(event_id.as_bytes()); + self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; + } + + Ok(()) + } +} diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs new file mode 100644 index 00000000..70e59acb --- /dev/null +++ b/src/database/key_value/rooms/state_accessor.rs @@ -0,0 +1,189 @@ +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; +use async_trait::async_trait; +use ruma::{events::StateEventType, EventId, RoomId}; + +#[async_trait] +impl service::rooms::state_accessor::Data for KeyValueDatabase { + async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + let mut result = BTreeMap::new(); + let mut i = 0; + for compressed in full_state.into_iter() { + let parsed = services() + .rooms + .state_compressor + .parse_compressed_state_event(&compressed)?; + result.insert(parsed.0, parsed.1); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + Ok(result) + } + + async fn state_full( + &self, + shortstatehash: u64, + ) -> Result>> { + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + + let mut result = HashMap::new(); + let mut i = 0; + for compressed in full_state { + let (_, eventid) = services() + .rooms + .state_compressor + .parse_compressed_state_event(&compressed)?; + if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { + result.insert( + ( + pdu.kind.to_string().into(), + pdu.state_key + .as_ref() + .ok_or_else(|| Error::bad_database("State event has no state key."))? + .clone(), + ), + pdu, + ); + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + Ok(result) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + fn state_get_id( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + let shortstatekey = match services() + .rooms + .short + .get_shortstatekey(event_type, state_key)? + { + Some(s) => s, + None => return Ok(None), + }; + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + Ok(full_state + .into_iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + .and_then(|compressed| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(&compressed) + .ok() + .map(|(_, id)| id) + })) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + fn state_get( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + self.state_get_id(shortstatehash, event_type, state_key)? + .map_or(Ok(None), |event_id| { + services().rooms.timeline.get_pdu(&event_id) + }) + } + + /// Returns the state hash for this pdu. + fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { + self.eventid_shorteventid + .get(event_id.as_bytes())? + .map_or(Ok(None), |shorteventid| { + self.shorteventid_shortstatehash + .get(&shorteventid)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "Invalid shortstatehash bytes in shorteventid_shortstatehash", + ) + }) + }) + .transpose() + }) + } + + /// Returns the full room state. + async fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result>> { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + self.state_full(current_shortstatehash).await + } else { + Ok(HashMap::new()) + } + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + self.state_get_id(current_shortstatehash, event_type, state_key) + } else { + Ok(None) + } + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + self.state_get(current_shortstatehash, event_type, state_key) + } else { + Ok(None) + } + } +} diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs new file mode 100644 index 00000000..d0ea0c2c --- /dev/null +++ b/src/database/key_value/rooms/state_cache.rs @@ -0,0 +1,622 @@ +use std::{collections::HashSet, sync::Arc}; + +use regex::Regex; +use ruma::{ + events::{AnyStrippedStateEvent, AnySyncStateEvent}, + serde::Raw, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::state_cache::Data for KeyValueDatabase { + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + self.roomuseroncejoinedids.insert(&userroom_id, &[]) + } + + fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_joined.insert(&userroom_id, &[])?; + self.roomuserid_joined.insert(&roomuser_id, &[])?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + fn mark_as_invited( + &self, + user_id: &UserId, + room_id: &RoomId, + last_state: Option>>, + ) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_invitestate.insert( + &userroom_id, + &serde_json::to_vec(&last_state.unwrap_or_default()) + .expect("state to bytes always works"), + )?; + self.roomuserid_invitecount.insert( + &roomuser_id, + &services().globals.next_count()?.to_be_bytes(), + )?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_leftstate.insert( + &userroom_id, + &serde_json::to_vec(&Vec::>::new()).unwrap(), + )?; // TODO + self.roomuserid_leftcount.insert( + &roomuser_id, + &services().globals.next_count()?.to_be_bytes(), + )?; + self.userroomid_joined.remove(&userroom_id)?; + self.roomuserid_joined.remove(&roomuser_id)?; + self.userroomid_invitestate.remove(&userroom_id)?; + self.roomuserid_invitecount.remove(&roomuser_id)?; + + Ok(()) + } + + fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { + let mut joinedcount = 0_u64; + let mut invitedcount = 0_u64; + let mut joined_servers = HashSet::new(); + let mut real_users = HashSet::new(); + + for joined in self.room_members(room_id).filter_map(|r| r.ok()) { + joined_servers.insert(joined.server_name().to_owned()); + if joined.server_name() == services().globals.server_name() + && !services().users.is_deactivated(&joined).unwrap_or(true) + { + real_users.insert(joined); + } + joinedcount += 1; + } + + for _invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { + invitedcount += 1; + } + + self.roomid_joinedcount + .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; + + self.roomid_invitedcount + .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; + + self.our_real_users_cache + .write() + .unwrap() + .insert(room_id.to_owned(), Arc::new(real_users)); + + for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { + if !joined_servers.remove(&old_joined_server) { + // Server not in room anymore + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(old_joined_server.as_bytes()); + + let mut serverroom_id = old_joined_server.as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.remove(&roomserver_id)?; + self.serverroomids.remove(&serverroom_id)?; + } + } + + // Now only new servers are in joined_servers anymore + for server in joined_servers { + let mut roomserver_id = room_id.as_bytes().to_vec(); + roomserver_id.push(0xff); + roomserver_id.extend_from_slice(server.as_bytes()); + + let mut serverroom_id = server.as_bytes().to_vec(); + serverroom_id.push(0xff); + serverroom_id.extend_from_slice(room_id.as_bytes()); + + self.roomserverids.insert(&roomserver_id, &[])?; + self.serverroomids.insert(&serverroom_id, &[])?; + } + + self.appservice_in_room_cache + .write() + .unwrap() + .remove(room_id); + + Ok(()) + } + + #[tracing::instrument(skip(self, room_id))] + fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { + let maybe = self + .our_real_users_cache + .read() + .unwrap() + .get(room_id) + .cloned(); + if let Some(users) = maybe { + Ok(users) + } else { + self.update_joined_count(room_id)?; + Ok(Arc::clone( + self.our_real_users_cache + .read() + .unwrap() + .get(room_id) + .unwrap(), + )) + } + } + + #[tracing::instrument(skip(self, room_id, appservice))] + fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &(String, serde_yaml::Value), + ) -> Result { + let maybe = self + .appservice_in_room_cache + .read() + .unwrap() + .get(room_id) + .and_then(|map| map.get(&appservice.0)) + .copied(); + + if let Some(b) = maybe { + Ok(b) + } else if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + + let bridge_user_id = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, services().globals.server_name()).ok() + }); + + let in_room = bridge_user_id + .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) + || self.room_members(room_id).any(|userid| { + userid.map_or(false, |userid| { + users.iter().any(|r| r.is_match(userid.as_str())) + }) + }); + + self.appservice_in_room_cache + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default() + .insert(appservice.0.clone(), in_room); + + Ok(in_room) + } else { + Ok(false) + } + } + + /// Makes a user forget a room. + #[tracing::instrument(skip(self))] + fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + let mut roomuser_id = room_id.as_bytes().to_vec(); + roomuser_id.push(0xff); + roomuser_id.extend_from_slice(user_id.as_bytes()); + + self.userroomid_leftstate.remove(&userroom_id)?; + self.roomuserid_leftcount.remove(&roomuser_id)?; + + Ok(()) + } + + /// Returns an iterator of all servers participating in this room. + #[tracing::instrument(skip(self))] + fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomserverids.scan_prefix(prefix).map(|(key, _)| { + ServerName::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Server name in roomserverids is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { + let mut key = server.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.serverroomids.get(&key).map(|o| o.is_some()) + } + + /// Returns an iterator of all rooms a server participates in (as far as we know). + #[tracing::instrument(skip(self))] + fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> Box> + 'a> { + let mut prefix = server.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.serverroomids.scan_prefix(prefix).map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, + ) + .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) + })) + } + + /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] + fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new(self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_joined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) + })) + } + + #[tracing::instrument(skip(self))] + fn room_joined_count(&self, room_id: &RoomId) -> Result> { + self.roomid_joinedcount + .get(room_id.as_bytes())? + .map(|b| { + utils::u64_from_bytes(&b) + .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + fn room_invited_count(&self, room_id: &RoomId) -> Result> { + self.roomid_invitedcount + .get(room_id.as_bytes())? + .map(|b| { + utils::u64_from_bytes(&b) + .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) + }) + .transpose() + } + + /// Returns an iterator over all User IDs who ever joined a room. + #[tracing::instrument(skip(self))] + fn room_useroncejoined<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new( + self.roomuseroncejoinedids + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database( + "User ID in room_useroncejoined is invalid unicode.", + ) + })?, + ) + .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) + }), + ) + } + + /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] + fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new( + self.roomuserid_invitecount + .scan_prefix(prefix) + .map(|(key, _)| { + UserId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("User ID in roomuserid_invited is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) + }), + ) + } + + #[tracing::instrument(skip(self))] + fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_invitecount + .get(&key)? + .map_or(Ok(None), |bytes| { + Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid invitecount in db.") + })?)) + }) + } + + #[tracing::instrument(skip(self))] + fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + let mut key = room_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(user_id.as_bytes()); + + self.roomuserid_leftcount + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid leftcount in db.")) + }) + .transpose() + } + + /// Returns an iterator over all rooms this user joined. + #[tracing::instrument(skip(self))] + fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> Box> + 'a> { + Box::new( + self.userroomid_joined + .scan_prefix(user_id.as_bytes().to_vec()) + .map(|(key, _)| { + RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_joined is invalid unicode.") + })?, + ) + .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) + }), + ) + } + + /// Returns an iterator over all rooms a user was invited to. + #[tracing::instrument(skip(self))] + fn rooms_invited<'a>( + &'a self, + user_id: &UserId, + ) -> Box>)>> + 'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new( + self.userroomid_invitestate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid.") + })?; + + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database("Invalid state in userroomid_invitestate.") + })?; + + Ok((room_id, state)) + }), + ) + } + + #[tracing::instrument(skip(self))] + fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.userroomid_invitestate + .get(&key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; + + Ok(state) + }) + .transpose() + } + + #[tracing::instrument(skip(self))] + fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(room_id.as_bytes()); + + self.userroomid_leftstate + .get(&key)? + .map(|state| { + let state = serde_json::from_slice(&state) + .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; + + Ok(state) + }) + .transpose() + } + + /// Returns an iterator over all rooms a user left. + #[tracing::instrument(skip(self))] + fn rooms_left<'a>( + &'a self, + user_id: &UserId, + ) -> Box>)>> + 'a> { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + Box::new( + self.userroomid_leftstate + .scan_prefix(prefix) + .map(|(key, state)| { + let room_id = RoomId::parse( + utils::string_from_bytes( + key.rsplit(|&b| b == 0xff) + .next() + .expect("rsplit always returns an element"), + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid unicode.") + })?, + ) + .map_err(|_| { + Error::bad_database("Room ID in userroomid_invited is invalid.") + })?; + + let state = serde_json::from_slice(&state).map_err(|_| { + Error::bad_database("Invalid state in userroomid_leftstate.") + })?; + + Ok((room_id, state)) + }), + ) + } + + #[tracing::instrument(skip(self))] + fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) + } + + #[tracing::instrument(skip(self))] + fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) + } +} diff --git a/src/database/key_value/rooms/state_compressor.rs b/src/database/key_value/rooms/state_compressor.rs new file mode 100644 index 00000000..d0a9be48 --- /dev/null +++ b/src/database/key_value/rooms/state_compressor.rs @@ -0,0 +1,61 @@ +use std::{collections::HashSet, mem::size_of}; + +use crate::{ + database::KeyValueDatabase, + service::{self, rooms::state_compressor::data::StateDiff}, + utils, Error, Result, +}; + +impl service::rooms::state_compressor::Data for KeyValueDatabase { + fn get_statediff(&self, shortstatehash: u64) -> Result { + let value = self + .shortstatehash_statediff + .get(&shortstatehash.to_be_bytes())? + .ok_or_else(|| Error::bad_database("State hash does not exist"))?; + let parent = + utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); + let parent = if parent != 0 { Some(parent) } else { None }; + + let mut add_mode = true; + let mut added = HashSet::new(); + let mut removed = HashSet::new(); + + let mut i = size_of::(); + while let Some(v) = value.get(i..i + 2 * size_of::()) { + if add_mode && v.starts_with(&0_u64.to_be_bytes()) { + add_mode = false; + i += size_of::(); + continue; + } + if add_mode { + added.insert(v.try_into().expect("we checked the size above")); + } else { + removed.insert(v.try_into().expect("we checked the size above")); + } + i += 2 * size_of::(); + } + + Ok(StateDiff { + parent, + added, + removed, + }) + } + + fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()> { + let mut value = diff.parent.unwrap_or(0).to_be_bytes().to_vec(); + for new in &diff.added { + value.extend_from_slice(&new[..]); + } + + if !diff.removed.is_empty() { + value.extend_from_slice(&0_u64.to_be_bytes()); + for removed in &diff.removed { + value.extend_from_slice(&removed[..]); + } + } + + self.shortstatehash_statediff + .insert(&shortstatehash.to_be_bytes(), &value) + } +} diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs new file mode 100644 index 00000000..336317da --- /dev/null +++ b/src/database/key_value/rooms/timeline.rs @@ -0,0 +1,370 @@ +use std::{collections::hash_map, mem::size_of, sync::Arc}; + +use ruma::{ + api::client::error::ErrorKind, CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId, +}; +use tracing::error; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, PduEvent, Result}; + +impl service::rooms::timeline::Data for KeyValueDatabase { + fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + // Look for PDUs in that room. + self.pduid_pdu + .iter_from(&prefix, false) + .filter(|(k, _)| k.starts_with(&prefix)) + .map(|(_, pdu)| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid first PDU in db.")) + .map(Arc::new) + }) + .next() + .transpose() + } + + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + match self + .lasttimelinecount_cache + .lock() + .unwrap() + .entry(room_id.to_owned()) + { + hash_map::Entry::Vacant(v) => { + if let Some(last_count) = self + .pdus_until(sender_user, room_id, u64::MAX)? + .filter_map(|r| { + // Filter out buggy events + if r.is_err() { + error!("Bad pdu in pdus_since: {:?}", r); + } + r.ok() + }) + .map(|(pduid, _)| self.pdu_count(&pduid)) + .next() + { + Ok(*v.insert(last_count?)) + } else { + Ok(0) + } + } + hash_map::Entry::Occupied(o) => Ok(*o.get()), + } + } + + /// Returns the `count` of this pdu's id. + fn get_pdu_count(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map(|pdu_id| self.pdu_count(&pdu_id)) + .transpose() + } + + /// Returns the json of a pdu. + fn get_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map_or_else( + || self.eventid_outlierpdu.get(event_id.as_bytes()), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .transpose() + } + + /// Returns the json of a pdu. + fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map(|pduid| { + self.pduid_pdu + .get(&pduid)? + .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) + }) + .transpose()? + .map(|pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .transpose() + } + + /// Returns the pdu's id. + fn get_pdu_id(&self, event_id: &EventId) -> Result>> { + self.eventid_pduid.get(event_id.as_bytes()) + } + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + self.eventid_pduid + .get(event_id.as_bytes())? + .map(|pduid| { + self.pduid_pdu + .get(&pduid)? + .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) + }) + .transpose()? + .map(|pdu| { + serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) + }) + .transpose() + } + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + fn get_pdu(&self, event_id: &EventId) -> Result>> { + if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { + return Ok(Some(Arc::clone(p))); + } + + if let Some(pdu) = self + .eventid_pduid + .get(event_id.as_bytes())? + .map_or_else( + || self.eventid_outlierpdu.get(event_id.as_bytes()), + |pduid| { + Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { + Error::bad_database("Invalid pduid in eventid_pduid.") + })?)) + }, + )? + .map(|pdu| { + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db.")) + .map(Arc::new) + }) + .transpose()? + { + self.pdu_cache + .lock() + .unwrap() + .insert(event_id.to_owned(), Arc::clone(&pdu)); + Ok(Some(pdu)) + } else { + Ok(None) + } + } + + /// Returns the pdu. + /// + /// This does __NOT__ check the outliers `Tree`. + fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { + self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + /// Returns the pdu as a `BTreeMap`. + fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { + self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { + Ok(Some( + serde_json::from_slice(&pdu) + .map_err(|_| Error::bad_database("Invalid PDU in db."))?, + )) + }) + } + + /// Returns the `count` of this pdu's id. + fn pdu_count(&self, pdu_id: &[u8]) -> Result { + utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) + .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) + } + + fn append_pdu( + &self, + pdu_id: &[u8], + pdu: &PduEvent, + json: &CanonicalJsonObject, + count: u64, + ) -> Result<()> { + self.pduid_pdu.insert( + pdu_id, + &serde_json::to_vec(json).expect("CanonicalJsonObject is always a valid"), + )?; + + self.lasttimelinecount_cache + .lock() + .unwrap() + .insert(pdu.room_id.clone(), count); + + self.eventid_pduid.insert(pdu.event_id.as_bytes(), pdu_id)?; + self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; + + Ok(()) + } + + /// Removes a pdu and creates a new one with the same id. + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { + if self.pduid_pdu.get(pdu_id)?.is_some() { + self.pduid_pdu.insert( + pdu_id, + &serde_json::to_vec(pdu).expect("CanonicalJsonObject is always a valid"), + )?; + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::NotFound, + "PDU does not exist.", + )) + } + } + + /// Returns an iterator over all events in a room that happened after the event with id `since` + /// in chronological order. + fn pdus_since<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + since: u64, + ) -> Result, PduEvent)>> + 'a>> { + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + // Skip the first pdu if it's exactly at since, because we sent that last time + let mut first_pdu_id = prefix.clone(); + first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); + + let user_id = user_id.to_owned(); + + Ok(Box::new( + self.pduid_pdu + .iter_from(&first_pdu_id, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) + } + + /// Returns an iterator over all events and their tokens in a room that happened before the + /// event with id `until` in reverse-chronological order. + fn pdus_until<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + until: u64, + ) -> Result, PduEvent)>> + 'a>> { + // Create the first part of the full pdu id + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let mut current = prefix.clone(); + current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` + + let current: &[u8] = ¤t; + + let user_id = user_id.to_owned(); + + Ok(Box::new( + self.pduid_pdu + .iter_from(current, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) + } + + fn pdus_after<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + from: u64, + ) -> Result, PduEvent)>> + 'a>> { + // Create the first part of the full pdu id + let prefix = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let mut current = prefix.clone(); + current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event + + let current: &[u8] = ¤t; + + let user_id = user_id.to_owned(); + + Ok(Box::new( + self.pduid_pdu + .iter_from(current, false) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .map(move |(pdu_id, v)| { + let mut pdu = serde_json::from_slice::(&v) + .map_err(|_| Error::bad_database("PDU in db is invalid."))?; + if pdu.sender != user_id { + pdu.remove_transaction_id()?; + } + Ok((pdu_id, pdu)) + }), + )) + } + + fn increment_notification_counts( + &self, + room_id: &RoomId, + notifies: Vec, + highlights: Vec, + ) -> Result<()> { + let mut notifies_batch = Vec::new(); + let mut highlights_batch = Vec::new(); + for user in notifies { + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + notifies_batch.push(userroom_id); + } + for user in highlights { + let mut userroom_id = user.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + highlights_batch.push(userroom_id); + } + + self.userroomid_notificationcount + .increment_batch(&mut notifies_batch.into_iter())?; + self.userroomid_highlightcount + .increment_batch(&mut highlights_batch.into_iter())?; + Ok(()) + } +} diff --git a/src/database/key_value/rooms/user.rs b/src/database/key_value/rooms/user.rs new file mode 100644 index 00000000..3d8d1c85 --- /dev/null +++ b/src/database/key_value/rooms/user.rs @@ -0,0 +1,124 @@ +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; + +use crate::{database::KeyValueDatabase, service, services, utils, Error, Result}; + +impl service::rooms::user::Data for KeyValueDatabase { + fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_notificationcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; + self.userroomid_highlightcount + .insert(&userroom_id, &0_u64.to_be_bytes())?; + + Ok(()) + } + + fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_notificationcount + .get(&userroom_id)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid notification count in db.")) + }) + .unwrap_or(Ok(0)) + } + + fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + let mut userroom_id = user_id.as_bytes().to_vec(); + userroom_id.push(0xff); + userroom_id.extend_from_slice(room_id.as_bytes()); + + self.userroomid_highlightcount + .get(&userroom_id)? + .map(|bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid highlight count in db.")) + }) + .unwrap_or(Ok(0)) + } + + fn associate_token_shortstatehash( + &self, + room_id: &RoomId, + token: u64, + shortstatehash: u64, + ) -> Result<()> { + let shortroomid = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists"); + + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(&token.to_be_bytes()); + + self.roomsynctoken_shortstatehash + .insert(&key, &shortstatehash.to_be_bytes()) + } + + fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + let shortroomid = services() + .rooms + .short + .get_shortroomid(room_id)? + .expect("room exists"); + + let mut key = shortroomid.to_be_bytes().to_vec(); + key.extend_from_slice(&token.to_be_bytes()); + + self.roomsynctoken_shortstatehash + .get(&key)? + .map(|bytes| { + utils::u64_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") + }) + }) + .transpose() + } + + fn get_shared_rooms<'a>( + &'a self, + users: Vec, + ) -> Result> + 'a>> { + let iterators = users.into_iter().map(move |user_id| { + let mut prefix = user_id.as_bytes().to_vec(); + prefix.push(0xff); + + self.userroomid_joined + .scan_prefix(prefix) + .map(|(key, _)| { + let roomid_index = key + .iter() + .enumerate() + .find(|(_, &b)| b == 0xff) + .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? + .0 + + 1; // +1 because the room id starts AFTER the separator + + let room_id = key[roomid_index..].to_vec(); + + Ok::<_, Error>(room_id) + }) + .filter_map(|r| r.ok()) + }); + + // We use the default compare function because keys are sorted correctly (not reversed) + Ok(Box::new( + utils::common_elements(iterators, Ord::cmp) + .expect("users is not empty") + .map(|bytes| { + RoomId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database("Invalid RoomId bytes in userroomid_joined") + })?) + .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) + }), + )) + } +} diff --git a/src/database/key_value/sending.rs b/src/database/key_value/sending.rs new file mode 100644 index 00000000..3fc3e042 --- /dev/null +++ b/src/database/key_value/sending.rs @@ -0,0 +1,205 @@ +use ruma::{ServerName, UserId}; + +use crate::{ + database::KeyValueDatabase, + service::{ + self, + sending::{OutgoingKind, SendingEventType}, + }, + services, utils, Error, Result, +}; + +impl service::sending::Data for KeyValueDatabase { + fn active_requests<'a>( + &'a self, + ) -> Box, OutgoingKind, SendingEventType)>> + 'a> { + Box::new( + self.servercurrentevent_data + .iter() + .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(k, e)| (key, k, e))), + ) + } + + fn active_requests_for<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box, SendingEventType)>> + 'a> { + let prefix = outgoing_kind.get_prefix(); + Box::new( + self.servercurrentevent_data + .scan_prefix(prefix) + .map(|(key, v)| parse_servercurrentevent(&key, v).map(|(_, e)| (key, e))), + ) + } + + fn delete_active_request(&self, key: Vec) -> Result<()> { + self.servercurrentevent_data.remove(&key) + } + + fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { + let prefix = outgoing_kind.get_prefix(); + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix) { + self.servercurrentevent_data.remove(&key)?; + } + + Ok(()) + } + + fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()> { + let prefix = outgoing_kind.get_prefix(); + for (key, _) in self.servercurrentevent_data.scan_prefix(prefix.clone()) { + self.servercurrentevent_data.remove(&key).unwrap(); + } + + for (key, _) in self.servernameevent_data.scan_prefix(prefix) { + self.servernameevent_data.remove(&key).unwrap(); + } + + Ok(()) + } + + fn queue_requests( + &self, + requests: &[(&OutgoingKind, SendingEventType)], + ) -> Result>> { + let mut batch = Vec::new(); + let mut keys = Vec::new(); + for (outgoing_kind, event) in requests { + let mut key = outgoing_kind.get_prefix(); + if let SendingEventType::Pdu(value) = &event { + key.extend_from_slice(value) + } else { + key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()) + } + let value = if let SendingEventType::Edu(value) = &event { + &**value + } else { + &[] + }; + batch.push((key.clone(), value.to_owned())); + keys.push(key); + } + self.servernameevent_data + .insert_batch(&mut batch.into_iter())?; + Ok(keys) + } + + fn queued_requests<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box)>> + 'a> { + let prefix = outgoing_kind.get_prefix(); + return Box::new( + self.servernameevent_data + .scan_prefix(prefix) + .map(|(k, v)| parse_servercurrentevent(&k, v).map(|(_, ev)| (ev, k))), + ); + } + + fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()> { + for (e, key) in events { + let value = if let SendingEventType::Edu(value) = &e { + &**value + } else { + &[] + }; + self.servercurrentevent_data.insert(key, value)?; + self.servernameevent_data.remove(key)?; + } + + Ok(()) + } + + fn set_latest_educount(&self, server_name: &ServerName, last_count: u64) -> Result<()> { + self.servername_educount + .insert(server_name.as_bytes(), &last_count.to_be_bytes()) + } + + fn get_latest_educount(&self, server_name: &ServerName) -> Result { + self.servername_educount + .get(server_name.as_bytes())? + .map_or(Ok(0), |bytes| { + utils::u64_from_bytes(&bytes) + .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) + }) + } +} + +#[tracing::instrument(skip(key))] +fn parse_servercurrentevent( + key: &[u8], + value: Vec, +) -> Result<(OutgoingKind, SendingEventType)> { + // Appservices start with a plus + Ok::<_, Error>(if key.starts_with(b"+") { + let mut parts = key[1..].splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + + ( + OutgoingKind::Appservice(server), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) + }, + ) + } else if key.starts_with(b"$") { + let mut parts = key[1..].splitn(3, |&b| b == 0xff); + + let user = parts.next().expect("splitn always returns one element"); + let user_string = utils::string_from_bytes(user) + .map_err(|_| Error::bad_database("Invalid user string in servercurrentevent"))?; + let user_id = UserId::parse(user_string) + .map_err(|_| Error::bad_database("Invalid user id in servercurrentevent"))?; + + let pushkey = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + let pushkey_string = utils::string_from_bytes(pushkey) + .map_err(|_| Error::bad_database("Invalid pushkey in servercurrentevent"))?; + + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + + ( + OutgoingKind::Push(user_id, pushkey_string), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + // I'm pretty sure this should never be called + SendingEventType::Edu(value) + }, + ) + } else { + let mut parts = key.splitn(2, |&b| b == 0xff); + + let server = parts.next().expect("splitn always returns one element"); + let event = parts + .next() + .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; + + let server = utils::string_from_bytes(server).map_err(|_| { + Error::bad_database("Invalid server bytes in server_currenttransaction") + })?; + + ( + OutgoingKind::Normal(ServerName::parse(server).map_err(|_| { + Error::bad_database("Invalid server string in server_currenttransaction") + })?), + if value.is_empty() { + SendingEventType::Pdu(event.to_vec()) + } else { + SendingEventType::Edu(value) + }, + ) + }) +} diff --git a/src/database/transaction_ids.rs b/src/database/key_value/transaction_ids.rs similarity index 71% rename from src/database/transaction_ids.rs rename to src/database/key_value/transaction_ids.rs index f3467572..2ea6ad4a 100644 --- a/src/database/transaction_ids.rs +++ b/src/database/key_value/transaction_ids.rs @@ -1,20 +1,13 @@ -use std::sync::Arc; +use ruma::{DeviceId, TransactionId, UserId}; -use crate::Result; -use ruma::{DeviceId, UserId}; +use crate::{database::KeyValueDatabase, service, Result}; -use super::abstraction::Tree; - -pub struct TransactionIds { - pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) -} - -impl TransactionIds { - pub fn add_txnid( +impl service::transaction_ids::Data for KeyValueDatabase { + fn add_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, - txn_id: &str, + txn_id: &TransactionId, data: &[u8], ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); @@ -28,11 +21,11 @@ impl TransactionIds { Ok(()) } - pub fn existing_txnid( + fn existing_txnid( &self, user_id: &UserId, device_id: Option<&DeviceId>, - txn_id: &str, + txn_id: &TransactionId, ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); diff --git a/src/database/key_value/uiaa.rs b/src/database/key_value/uiaa.rs new file mode 100644 index 00000000..5fd91b07 --- /dev/null +++ b/src/database/key_value/uiaa.rs @@ -0,0 +1,89 @@ +use ruma::{ + api::client::{error::ErrorKind, uiaa::UiaaInfo}, + CanonicalJsonValue, DeviceId, UserId, +}; + +use crate::{database::KeyValueDatabase, service, Error, Result}; + +impl service::uiaa::Data for KeyValueDatabase { + fn set_uiaa_request( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + request: &CanonicalJsonValue, + ) -> Result<()> { + self.userdevicesessionid_uiaarequest + .write() + .unwrap() + .insert( + (user_id.to_owned(), device_id.to_owned(), session.to_owned()), + request.to_owned(), + ); + + Ok(()) + } + + fn get_uiaa_request( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + ) -> Option { + self.userdevicesessionid_uiaarequest + .read() + .unwrap() + .get(&(user_id.to_owned(), device_id.to_owned(), session.to_owned())) + .map(|j| j.to_owned()) + } + + fn update_uiaa_session( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + uiaainfo: Option<&UiaaInfo>, + ) -> Result<()> { + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(session.as_bytes()); + + if let Some(uiaainfo) = uiaainfo { + self.userdevicesessionid_uiaainfo.insert( + &userdevicesessionid, + &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), + )?; + } else { + self.userdevicesessionid_uiaainfo + .remove(&userdevicesessionid)?; + } + + Ok(()) + } + + fn get_uiaa_session( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + ) -> Result { + let mut userdevicesessionid = user_id.as_bytes().to_vec(); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(device_id.as_bytes()); + userdevicesessionid.push(0xff); + userdevicesessionid.extend_from_slice(session.as_bytes()); + + serde_json::from_slice( + &self + .userdevicesessionid_uiaainfo + .get(&userdevicesessionid)? + .ok_or(Error::BadRequest( + ErrorKind::Forbidden, + "UIAA session does not exist.", + ))?, + ) + .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) + } +} diff --git a/src/database/users.rs b/src/database/key_value/users.rs similarity index 62% rename from src/database/users.rs rename to src/database/key_value/users.rs index d0da0714..cd5a5352 100644 --- a/src/database/users.rs +++ b/src/database/key_value/users.rs @@ -1,49 +1,29 @@ -use crate::{utils, Error, Result}; +use std::{collections::BTreeMap, mem::size_of}; + use ruma::{ - api::client::{error::ErrorKind, r0::device::Device}, + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, - events::{AnyToDeviceEvent, EventType}, - identifiers::MxcUri, + events::{AnyToDeviceEvent, StateEventType}, serde::Raw, - DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, RoomAliasId, UInt, - UserId, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, MilliSecondsSinceUnixEpoch, OwnedDeviceId, + OwnedDeviceKeyId, OwnedMxcUri, OwnedUserId, UInt, UserId, }; -use std::{collections::BTreeMap, convert::TryFrom, mem, sync::Arc}; use tracing::warn; -use super::abstraction::Tree; - -pub struct Users { - pub(super) userid_password: Arc, - pub(super) userid_displayname: Arc, - pub(super) userid_avatarurl: Arc, - pub(super) userid_blurhash: Arc, - pub(super) userdeviceid_token: Arc, - pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists - pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 - pub(super) token_userdeviceid: Arc, - - pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId - pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count - pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count - pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) - pub(super) userid_masterkeyid: Arc, - pub(super) userid_selfsigningkeyid: Arc, - pub(super) userid_usersigningkeyid: Arc, - - pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count -} +use crate::{ + database::KeyValueDatabase, + service::{self, users::clean_signatures}, + services, utils, Error, Result, +}; -impl Users { +impl service::users::Data for KeyValueDatabase { /// Check if a user has an account on this homeserver. - #[tracing::instrument(skip(self, user_id))] - pub fn exists(&self, user_id: &UserId) -> Result { + fn exists(&self, user_id: &UserId) -> Result { Ok(self.userid_password.get(user_id.as_bytes())?.is_some()) } /// Check if account is deactivated - #[tracing::instrument(skip(self, user_id))] - pub fn is_deactivated(&self, user_id: &UserId) -> Result { + fn is_deactivated(&self, user_id: &UserId) -> Result { Ok(self .userid_password .get(user_id.as_bytes())? @@ -54,38 +34,13 @@ impl Users { .is_empty()) } - /// Check if a user is an admin - #[tracing::instrument(skip(self, user_id, rooms, globals))] - pub fn is_admin( - &self, - user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, - ) -> Result { - let admin_room_alias_id = - RoomAliasId::try_from(format!("#admins:{}", globals.server_name())) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; - let admin_room_id = rooms.id_from_alias(&admin_room_alias_id)?.unwrap(); - - Ok(rooms.is_joined(user_id, &admin_room_id)?) - } - - /// Create a new user account on this homeserver. - #[tracing::instrument(skip(self, user_id, password))] - pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { - self.set_password(user_id, password)?; - Ok(()) - } - /// Returns the number of users registered on this server. - #[tracing::instrument(skip(self))] - pub fn count(&self) -> Result { + fn count(&self) -> Result { Ok(self.userid_password.iter().count()) } /// Find out which user an access token belongs to. - #[tracing::instrument(skip(self, token))] - pub fn find_from_token(&self, token: &str) -> Result> { + fn find_from_token(&self, token: &str) -> Result> { self.token_userdeviceid .get(token.as_bytes())? .map_or(Ok(None), |bytes| { @@ -98,7 +53,7 @@ impl Users { })?; Ok(Some(( - UserId::try_from(utils::string_from_bytes(user_bytes).map_err(|_| { + UserId::parse(utils::string_from_bytes(user_bytes).map_err(|_| { Error::bad_database("User ID in token_userdeviceid is invalid unicode.") })?) .map_err(|_| { @@ -112,19 +67,29 @@ impl Users { } /// Returns an iterator over all users on this homeserver. - #[tracing::instrument(skip(self))] - pub fn iter(&self) -> impl Iterator> + '_ { - self.userid_password.iter().map(|(bytes, _)| { - UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { + fn iter<'a>(&'a self) -> Box> + 'a> { + Box::new(self.userid_password.iter().map(|(bytes, _)| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { Error::bad_database("User ID in userid_password is invalid unicode.") })?) .map_err(|_| Error::bad_database("User ID in userid_password is invalid.")) - }) + })) + } + + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password is greater then zero. + fn list_local_users(&self) -> Result> { + let users: Vec = self + .userid_password + .iter() + .filter_map(|(username, pw)| get_username_with_valid_password(&username, &pw)) + .collect(); + Ok(users) } /// Returns the password hash for the given user. - #[tracing::instrument(skip(self, user_id))] - pub fn password_hash(&self, user_id: &UserId) -> Result> { + fn password_hash(&self, user_id: &UserId) -> Result> { self.userid_password .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -135,10 +100,9 @@ impl Users { } /// Hash and set the user's password to the Argon2 hash - #[tracing::instrument(skip(self, user_id, password))] - pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { if let Some(password) = password { - if let Ok(hash) = utils::calculate_hash(password) { + if let Ok(hash) = utils::calculate_password_hash(password) { self.userid_password .insert(user_id.as_bytes(), hash.as_bytes())?; Ok(()) @@ -155,8 +119,7 @@ impl Users { } /// Returns the displayname of a user on this homeserver. - #[tracing::instrument(skip(self, user_id))] - pub fn displayname(&self, user_id: &UserId) -> Result> { + fn displayname(&self, user_id: &UserId) -> Result> { self.userid_displayname .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -167,8 +130,7 @@ impl Users { } /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. - #[tracing::instrument(skip(self, user_id, displayname))] - pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { + fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { if let Some(displayname) = displayname { self.userid_displayname .insert(user_id.as_bytes(), displayname.as_bytes())?; @@ -180,21 +142,20 @@ impl Users { } /// Get the avatar_url of a user. - #[tracing::instrument(skip(self, user_id))] - pub fn avatar_url(&self, user_id: &UserId) -> Result> { + fn avatar_url(&self, user_id: &UserId) -> Result> { self.userid_avatarurl .get(user_id.as_bytes())? .map(|bytes| { let s = utils::string_from_bytes(&bytes) .map_err(|_| Error::bad_database("Avatar URL in db is invalid."))?; - MxcUri::try_from(s).map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) + s.try_into() + .map_err(|_| Error::bad_database("Avatar URL in db is invalid.")) }) .transpose() } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, avatar_url))] - pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { if let Some(avatar_url) = avatar_url { self.userid_avatarurl .insert(user_id.as_bytes(), avatar_url.to_string().as_bytes())?; @@ -206,8 +167,7 @@ impl Users { } /// Get the blurhash of a user. - #[tracing::instrument(skip(self, user_id))] - pub fn blurhash(&self, user_id: &UserId) -> Result> { + fn blurhash(&self, user_id: &UserId) -> Result> { self.userid_blurhash .get(user_id.as_bytes())? .map(|bytes| { @@ -220,8 +180,7 @@ impl Users { } /// Sets a new avatar_url or removes it if avatar_url is None. - #[tracing::instrument(skip(self, user_id, blurhash))] - pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { + fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { if let Some(blurhash) = blurhash { self.userid_blurhash .insert(user_id.as_bytes(), blurhash.as_bytes())?; @@ -233,8 +192,7 @@ impl Users { } /// Adds a new device to a user. - #[tracing::instrument(skip(self, user_id, device_id, token, initial_device_display_name))] - pub fn create_device( + fn create_device( &self, user_id: &UserId, device_id: &DeviceId, @@ -268,8 +226,7 @@ impl Users { } /// Removes a device from a user. - #[tracing::instrument(skip(self, user_id, device_id))] - pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -299,31 +256,32 @@ impl Users { } /// Returns an iterator over all device ids of this user. - #[tracing::instrument(skip(self, user_id))] - pub fn all_device_ids<'a>( + fn all_device_ids<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator>> + 'a { + ) -> Box> + 'a> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); // All devices have metadata - self.userdeviceid_metadata - .scan_prefix(prefix) - .map(|(bytes, _)| { - Ok(utils::string_from_bytes( - bytes - .rsplit(|&b| b == 0xff) - .next() - .ok_or_else(|| Error::bad_database("UserDevice ID in db is invalid."))?, - ) - .map_err(|_| Error::bad_database("Device ID in userdeviceid_metadata is invalid."))? - .into()) - }) + Box::new( + self.userdeviceid_metadata + .scan_prefix(prefix) + .map(|(bytes, _)| { + Ok(utils::string_from_bytes( + bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { + Error::bad_database("UserDevice ID in db is invalid.") + })?, + ) + .map_err(|_| { + Error::bad_database("Device ID in userdeviceid_metadata is invalid.") + })? + .into()) + }), + ) } /// Replaces the access token of one device. - #[tracing::instrument(skip(self, user_id, device_id, token))] - pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); userdeviceid.extend_from_slice(device_id.as_bytes()); @@ -346,21 +304,12 @@ impl Users { Ok(()) } - #[tracing::instrument(skip( - self, - user_id, - device_id, - one_time_key_key, - one_time_key_value, - globals - ))] - pub fn add_one_time_key( + fn add_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, one_time_key_key: &DeviceKeyId, - one_time_key_value: &OneTimeKey, - globals: &super::globals::Globals, + one_time_key_value: &Raw, ) -> Result<()> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); @@ -384,14 +333,15 @@ impl Users { &serde_json::to_vec(&one_time_key_value).expect("OneTimeKey::to_vec always works"), )?; - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + self.userid_lastonetimekeyupdate.insert( + user_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; Ok(()) } - #[tracing::instrument(skip(self, user_id))] - pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { + fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { self.userid_lastonetimekeyupdate .get(user_id.as_bytes())? .map(|bytes| { @@ -402,14 +352,12 @@ impl Users { .unwrap_or(Ok(0)) } - #[tracing::instrument(skip(self, user_id, device_id, key_algorithm, globals))] - pub fn take_one_time_key( + fn take_one_time_key( &self, user_id: &UserId, device_id: &DeviceId, key_algorithm: &DeviceKeyAlgorithm, - globals: &super::globals::Globals, - ) -> Result> { + ) -> Result)>> { let mut prefix = user_id.as_bytes().to_vec(); prefix.push(0xff); prefix.extend_from_slice(device_id.as_bytes()); @@ -418,8 +366,10 @@ impl Users { prefix.extend_from_slice(key_algorithm.as_ref().as_bytes()); prefix.push(b':'); - self.userid_lastonetimekeyupdate - .insert(user_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + self.userid_lastonetimekeyupdate.insert( + user_id.as_bytes(), + &services().globals.next_count()?.to_be_bytes(), + )?; self.onetimekeyid_onetimekeys .scan_prefix(prefix) @@ -429,21 +379,19 @@ impl Users { Ok(( serde_json::from_slice( - &*key - .rsplit(|&b| b == 0xff) + key.rsplit(|&b| b == 0xff) .next() .ok_or_else(|| Error::bad_database("OneTimeKeyId in db is invalid."))?, ) .map_err(|_| Error::bad_database("OneTimeKeyId in db is invalid."))?, - serde_json::from_slice(&*value) + serde_json::from_slice(&value) .map_err(|_| Error::bad_database("OneTimeKeys in db are invalid."))?, )) }) .transpose() } - #[tracing::instrument(skip(self, user_id, device_id))] - pub fn count_one_time_keys( + fn count_one_time_keys( &self, user_id: &UserId, device_id: &DeviceId, @@ -459,8 +407,8 @@ impl Users { .scan_prefix(userdeviceid) .map(|(bytes, _)| { Ok::<_, Error>( - serde_json::from_slice::( - &*bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { + serde_json::from_slice::( + bytes.rsplit(|&b| b == 0xff).next().ok_or_else(|| { Error::bad_database("OneTimeKey ID in db is invalid.") })?, ) @@ -475,14 +423,11 @@ impl Users { Ok(counts) } - #[tracing::instrument(skip(self, user_id, device_id, device_keys, rooms, globals))] - pub fn add_device_keys( + fn add_device_keys( &self, user_id: &UserId, device_id: &DeviceId, - device_keys: &DeviceKeys, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, + device_keys: &Raw, ) -> Result<()> { let mut userdeviceid = user_id.as_bytes().to_vec(); userdeviceid.push(0xff); @@ -493,27 +438,17 @@ impl Users { &serde_json::to_vec(&device_keys).expect("DeviceKeys::to_vec always works"), )?; - self.mark_device_key_update(user_id, rooms, globals)?; + self.mark_device_key_update(user_id)?; Ok(()) } - #[tracing::instrument(skip( - self, - master_key, - self_signing_key, - user_signing_key, - rooms, - globals - ))] - pub fn add_cross_signing_keys( + fn add_cross_signing_keys( &self, user_id: &UserId, - master_key: &CrossSigningKey, - self_signing_key: &Option, - user_signing_key: &Option, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, + master_key: &Raw, + self_signing_key: &Option>, + user_signing_key: &Option>, ) -> Result<()> { // TODO: Check signatures @@ -521,7 +456,12 @@ impl Users { prefix.push(0xff); // Master key - let mut master_key_ids = master_key.keys.values(); + let mut master_key_ids = master_key + .deserialize() + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid master key"))? + .keys + .into_values(); + let master_key_id = master_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Master key contained no key.", @@ -537,17 +477,22 @@ impl Users { let mut master_key_key = prefix.clone(); master_key_key.extend_from_slice(master_key_id.as_bytes()); - self.keyid_key.insert( - &master_key_key, - &serde_json::to_vec(&master_key).expect("CrossSigningKey::to_vec always works"), - )?; + self.keyid_key + .insert(&master_key_key, master_key.json().get().as_bytes())?; self.userid_masterkeyid .insert(user_id.as_bytes(), &master_key_key)?; // Self-signing key if let Some(self_signing_key) = self_signing_key { - let mut self_signing_key_ids = self_signing_key.keys.values(); + let mut self_signing_key_ids = self_signing_key + .deserialize() + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid self signing key") + })? + .keys + .into_values(); + let self_signing_key_id = self_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "Self signing key contained no key.", @@ -565,8 +510,7 @@ impl Users { self.keyid_key.insert( &self_signing_key_key, - &serde_json::to_vec(&self_signing_key) - .expect("CrossSigningKey::to_vec always works"), + self_signing_key.json().get().as_bytes(), )?; self.userid_selfsigningkeyid @@ -575,7 +519,14 @@ impl Users { // User-signing key if let Some(user_signing_key) = user_signing_key { - let mut user_signing_key_ids = user_signing_key.keys.values(); + let mut user_signing_key_ids = user_signing_key + .deserialize() + .map_err(|_| { + Error::BadRequest(ErrorKind::InvalidParam, "Invalid user signing key") + })? + .keys + .into_values(); + let user_signing_key_id = user_signing_key_ids.next().ok_or(Error::BadRequest( ErrorKind::InvalidParam, "User signing key contained no key.", @@ -593,28 +544,24 @@ impl Users { self.keyid_key.insert( &user_signing_key_key, - &serde_json::to_vec(&user_signing_key) - .expect("CrossSigningKey::to_vec always works"), + user_signing_key.json().get().as_bytes(), )?; self.userid_usersigningkeyid .insert(user_id.as_bytes(), &user_signing_key_key)?; } - self.mark_device_key_update(user_id, rooms, globals)?; + self.mark_device_key_update(user_id)?; Ok(()) } - #[tracing::instrument(skip(self, target_id, key_id, signature, sender_id, rooms, globals))] - pub fn sign_key( + fn sign_key( &self, target_id: &UserId, key_id: &str, signature: (String, String), sender_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = target_id.as_bytes().to_vec(); key.push(0xff); @@ -632,7 +579,7 @@ impl Users { .ok_or_else(|| Error::bad_database("key in keyid_key has no signatures field."))? .as_object_mut() .ok_or_else(|| Error::bad_database("key in keyid_key has invalid signatures field."))? - .entry(sender_id.clone()) + .entry(sender_id.to_string()) .or_insert_with(|| serde_json::Map::new().into()); signatures @@ -646,18 +593,17 @@ impl Users { )?; // TODO: Should we notify about this change? - self.mark_device_key_update(target_id, rooms, globals)?; + self.mark_device_key_update(target_id)?; Ok(()) } - #[tracing::instrument(skip(self, user_or_room_id, from, to))] - pub fn keys_changed<'a>( + fn keys_changed<'a>( &'a self, user_or_room_id: &str, from: u64, to: Option, - ) -> impl Iterator> + 'a { + ) -> Box> + 'a> { let mut prefix = user_or_room_id.as_bytes().to_vec(); prefix.push(0xff); @@ -666,42 +612,49 @@ impl Users { let to = to.unwrap_or(u64::MAX); - self.keychangeid_userid - .iter_from(&start, false) - .take_while(move |(k, _)| { - k.starts_with(&prefix) - && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { - if let Ok(c) = utils::u64_from_bytes(current) { - c <= to + Box::new( + self.keychangeid_userid + .iter_from(&start, false) + .take_while(move |(k, _)| { + k.starts_with(&prefix) + && if let Some(current) = k.splitn(2, |&b| b == 0xff).nth(1) { + if let Ok(c) = utils::u64_from_bytes(current) { + c <= to + } else { + warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + false + } } else { - warn!("BadDatabase: Could not parse keychangeid_userid bytes"); + warn!("BadDatabase: Could not parse keychangeid_userid"); false } - } else { - warn!("BadDatabase: Could not parse keychangeid_userid"); - false - } - }) - .map(|(_, bytes)| { - UserId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("User ID in devicekeychangeid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in devicekeychangeid_userid is invalid.")) - }) - } - - #[tracing::instrument(skip(self, user_id, rooms, globals))] - pub fn mark_device_key_update( - &self, - user_id: &UserId, - rooms: &super::rooms::Rooms, - globals: &super::globals::Globals, - ) -> Result<()> { - let count = globals.next_count()?.to_be_bytes(); - for room_id in rooms.rooms_joined(user_id).filter_map(|r| r.ok()) { + }) + .map(|(_, bytes)| { + UserId::parse(utils::string_from_bytes(&bytes).map_err(|_| { + Error::bad_database( + "User ID in devicekeychangeid_userid is invalid unicode.", + ) + })?) + .map_err(|_| { + Error::bad_database("User ID in devicekeychangeid_userid is invalid.") + }) + }), + ) + } + + fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { + let count = services().globals.next_count()?.to_be_bytes(); + for room_id in services() + .rooms + .state_cache + .rooms_joined(user_id) + .filter_map(|r| r.ok()) + { // Don't send key updates to unencrypted rooms - if rooms - .room_state_get(&room_id, &EventType::RoomEncryption, "")? + if services() + .rooms + .state_accessor + .room_state_get(&room_id, &StateEventType::RoomEncryption, "")? .is_none() { continue; @@ -722,12 +675,11 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id))] - pub fn get_device_keys( + fn get_device_keys( &self, user_id: &UserId, device_id: &DeviceId, - ) -> Result> { + ) -> Result>> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(device_id.as_bytes()); @@ -739,64 +691,49 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] - pub fn get_master_key bool>( + fn get_master_key( &self, user_id: &UserId, - allowed_signatures: F, - ) -> Result> { + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { self.userid_masterkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?; - - // A user is not allowed to see signatures from users other than himself and - // the target user - cross_signing_key.signatures = cross_signing_key - .signatures - .into_iter() - .filter(|(user, _)| allowed_signatures(user)) - .collect(); - - Ok(Some(cross_signing_key)) + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) }) }) } - #[tracing::instrument(skip(self, user_id, allowed_signatures))] - pub fn get_self_signing_key bool>( + fn get_self_signing_key( &self, user_id: &UserId, - allowed_signatures: F, - ) -> Result> { + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { self.userid_selfsigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { self.keyid_key.get(&key)?.map_or(Ok(None), |bytes| { - let mut cross_signing_key = serde_json::from_slice::(&bytes) - .map_err(|_| { - Error::bad_database("CrossSigningKey in db is invalid.") - })?; - - // A user is not allowed to see signatures from users other than himself and - // the target user - cross_signing_key.signatures = cross_signing_key - .signatures - .into_iter() - .filter(|(user, _)| user == user_id || allowed_signatures(user)) - .collect(); - - Ok(Some(cross_signing_key)) + let mut cross_signing_key = serde_json::from_slice::(&bytes) + .map_err(|_| Error::bad_database("CrossSigningKey in db is invalid."))?; + clean_signatures(&mut cross_signing_key, user_id, allowed_signatures)?; + + Ok(Some(Raw::from_json( + serde_json::value::to_raw_value(&cross_signing_key) + .expect("Value to RawValue serialization"), + ))) }) }) } - #[tracing::instrument(skip(self, user_id))] - pub fn get_user_signing_key(&self, user_id: &UserId) -> Result> { + fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { self.userid_usersigningkeyid .get(user_id.as_bytes())? .map_or(Ok(None), |key| { @@ -808,29 +745,19 @@ impl Users { }) } - #[tracing::instrument(skip( - self, - sender, - target_user_id, - target_device_id, - event_type, - content, - globals - ))] - pub fn add_to_device_event( + fn add_to_device_event( &self, sender: &UserId, target_user_id: &UserId, target_device_id: &DeviceId, event_type: &str, content: serde_json::Value, - globals: &super::globals::Globals, ) -> Result<()> { let mut key = target_user_id.as_bytes().to_vec(); key.push(0xff); key.extend_from_slice(target_device_id.as_bytes()); key.push(0xff); - key.extend_from_slice(&globals.next_count()?.to_be_bytes()); + key.extend_from_slice(&services().globals.next_count()?.to_be_bytes()); let mut json = serde_json::Map::new(); json.insert("type".to_owned(), event_type.to_owned().into()); @@ -844,8 +771,7 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id))] - pub fn get_to_device_events( + fn get_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, @@ -867,8 +793,7 @@ impl Users { Ok(events) } - #[tracing::instrument(skip(self, user_id, device_id, until))] - pub fn remove_to_device_events( + fn remove_to_device_events( &self, user_id: &UserId, device_id: &DeviceId, @@ -889,7 +814,7 @@ impl Users { .map(|(key, _)| { Ok::<_, Error>(( key.clone(), - utils::u64_from_bytes(&key[key.len() - mem::size_of::()..key.len()]) + utils::u64_from_bytes(&key[key.len() - size_of::()..key.len()]) .map_err(|_| Error::bad_database("ToDeviceId has invalid count bytes."))?, )) }) @@ -902,8 +827,7 @@ impl Users { Ok(()) } - #[tracing::instrument(skip(self, user_id, device_id, device))] - pub fn update_device_metadata( + fn update_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, @@ -928,8 +852,7 @@ impl Users { } /// Get device metadata. - #[tracing::instrument(skip(self, user_id, device_id))] - pub fn get_device_metadata( + fn get_device_metadata( &self, user_id: &UserId, device_id: &DeviceId, @@ -947,8 +870,7 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id))] - pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { + fn get_devicelist_version(&self, user_id: &UserId) -> Result> { self.userid_devicelistversion .get(user_id.as_bytes())? .map_or(Ok(None), |bytes| { @@ -958,36 +880,78 @@ impl Users { }) } - #[tracing::instrument(skip(self, user_id))] - pub fn all_devices_metadata<'a>( + fn all_devices_metadata<'a>( &'a self, user_id: &UserId, - ) -> impl Iterator> + 'a { + ) -> Box> + 'a> { let mut key = user_id.as_bytes().to_vec(); key.push(0xff); - self.userdeviceid_metadata - .scan_prefix(key) - .map(|(_, bytes)| { - serde_json::from_slice::(&bytes) - .map_err(|_| Error::bad_database("Device in userdeviceid_metadata is invalid.")) - }) + Box::new( + self.userdeviceid_metadata + .scan_prefix(key) + .map(|(_, bytes)| { + serde_json::from_slice::(&bytes).map_err(|_| { + Error::bad_database("Device in userdeviceid_metadata is invalid.") + }) + }), + ) } - /// Deactivate account - #[tracing::instrument(skip(self, user_id))] - pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { - // Remove all associated devices - for device_id in self.all_device_ids(user_id) { - self.remove_device(user_id, &device_id?)?; - } + /// Creates a new sync filter. Returns the filter id. + fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result { + let filter_id = utils::random_string(4); - // Set the password to "" to indicate a deactivated account. Hashes will never result in an - // empty string, so the user will not be able to log in again. Systems like changing the - // password without logging in should check if the account is deactivated. - self.userid_password.insert(user_id.as_bytes(), &[])?; + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(filter_id.as_bytes()); - // TODO: Unhook 3PID - Ok(()) + self.userfilterid_filter.insert( + &key, + &serde_json::to_vec(&filter).expect("filter is valid json"), + )?; + + Ok(filter_id) + } + + fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result> { + let mut key = user_id.as_bytes().to_vec(); + key.push(0xff); + key.extend_from_slice(filter_id.as_bytes()); + + let raw = self.userfilterid_filter.get(&key)?; + + if let Some(raw) = raw { + serde_json::from_slice(&raw) + .map_err(|_| Error::bad_database("Invalid filter event in db.")) + } else { + Ok(None) + } + } +} + +/// Will only return with Some(username) if the password was not empty and the +/// username could be successfully parsed. +/// If utils::string_from_bytes(...) returns an error that username will be skipped +/// and the error will be logged. +fn get_username_with_valid_password(username: &[u8], password: &[u8]) -> Option { + // A valid password is not empty + if password.is_empty() { + None + } else { + match utils::string_from_bytes(username) { + Ok(u) => Some(u), + Err(e) => { + warn!( + "Failed to parse username while calling get_local_users(): {}", + e.to_string() + ); + None + } + } } } diff --git a/src/database/media.rs b/src/database/media.rs deleted file mode 100644 index 46630131..00000000 --- a/src/database/media.rs +++ /dev/null @@ -1,358 +0,0 @@ -use crate::database::globals::Globals; -use image::{imageops::FilterType, GenericImageView}; - -use super::abstraction::Tree; -use crate::{utils, Error, Result}; -use std::{mem, sync::Arc}; -use tokio::{ - fs::File, - io::{AsyncReadExt, AsyncWriteExt}, -}; - -pub struct FileMeta { - pub content_disposition: Option, - pub content_type: Option, - pub file: Vec, -} - -pub struct Media { - pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType -} - -impl Media { - /// Uploads a file. - pub async fn create( - &self, - mxc: String, - globals: &Globals, - content_disposition: &Option<&str>, - content_type: &Option<&str>, - file: &[u8], - ) -> Result<()> { - let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - key.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xff); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); - - let path = globals.get_media_file(&key); - let mut f = File::create(path).await?; - f.write_all(file).await?; - - self.mediaid_file.insert(&key, &[])?; - Ok(()) - } - - /// Uploads or replaces a file thumbnail. - #[allow(clippy::too_many_arguments)] - pub async fn upload_thumbnail( - &self, - mxc: String, - globals: &Globals, - content_disposition: &Option, - content_type: &Option, - width: u32, - height: u32, - file: &[u8], - ) -> Result<()> { - let mut key = mxc.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&width.to_be_bytes()); - key.extend_from_slice(&height.to_be_bytes()); - key.push(0xff); - key.extend_from_slice( - content_disposition - .as_ref() - .map(|f| f.as_bytes()) - .unwrap_or_default(), - ); - key.push(0xff); - key.extend_from_slice( - content_type - .as_ref() - .map(|c| c.as_bytes()) - .unwrap_or_default(), - ); - - let path = globals.get_media_file(&key); - let mut f = File::create(path).await?; - f.write_all(file).await?; - - self.mediaid_file.insert(&key, &[])?; - - Ok(()) - } - - /// Downloads a file. - pub async fn get(&self, globals: &Globals, mxc: &str) -> Result> { - let mut prefix = mxc.as_bytes().to_vec(); - prefix.push(0xff); - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - prefix.push(0xff); - - let first = self.mediaid_file.scan_prefix(prefix).next(); - if let Some((key, _)) = first { - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file, - })) - } else { - Ok(None) - } - } - - /// Returns width, height of the thumbnail and whether it should be cropped. Returns None when - /// the server should send the original file. - pub fn thumbnail_properties(&self, width: u32, height: u32) -> Option<(u32, u32, bool)> { - match (width, height) { - (0..=32, 0..=32) => Some((32, 32, true)), - (0..=96, 0..=96) => Some((96, 96, true)), - (0..=320, 0..=240) => Some((320, 240, false)), - (0..=640, 0..=480) => Some((640, 480, false)), - (0..=800, 0..=600) => Some((800, 600, false)), - _ => None, - } - } - - /// Downloads a file's thumbnail. - /// - /// Here's an example on how it works: - /// - /// - Client requests an image with width=567, height=567 - /// - Server rounds that up to (800, 600), so it doesn't have to save too many thumbnails - /// - Server rounds that up again to (958, 600) to fix the aspect ratio (only for width,height>96) - /// - Server creates the thumbnail and sends it to the user - /// - /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. - pub async fn get_thumbnail( - &self, - mxc: String, - globals: &Globals, - width: u32, - height: u32, - ) -> Result> { - let (width, height, crop) = self - .thumbnail_properties(width, height) - .unwrap_or((0, 0, false)); // 0, 0 because that's the original file - - let mut main_prefix = mxc.as_bytes().to_vec(); - main_prefix.push(0xff); - - let mut thumbnail_prefix = main_prefix.clone(); - thumbnail_prefix.extend_from_slice(&width.to_be_bytes()); - thumbnail_prefix.extend_from_slice(&height.to_be_bytes()); - thumbnail_prefix.push(0xff); - - let mut original_prefix = main_prefix; - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Width = 0 if it's not a thumbnail - original_prefix.extend_from_slice(&0_u32.to_be_bytes()); // Height = 0 if it's not a thumbnail - original_prefix.push(0xff); - - let first_thumbnailprefix = self.mediaid_file.scan_prefix(thumbnail_prefix).next(); - let first_originalprefix = self.mediaid_file.scan_prefix(original_prefix).next(); - if let Some((key, _)) = first_thumbnailprefix { - // Using saved thumbnail - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database("Content Disposition in db is invalid.") - })?, - ) - }; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })) - } else if let Some((key, _)) = first_originalprefix { - // Generate a thumbnail - let path = globals.get_media_file(&key); - let mut file = Vec::new(); - File::open(path).await?.read_to_end(&mut file).await?; - - let mut parts = key.rsplit(|&b| b == 0xff); - - let content_type = parts - .next() - .map(|bytes| { - utils::string_from_bytes(bytes).map_err(|_| { - Error::bad_database("Content type in mediaid_file is invalid unicode.") - }) - }) - .transpose()?; - - let content_disposition_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Media ID in db is invalid."))?; - - let content_disposition = if content_disposition_bytes.is_empty() { - None - } else { - Some( - utils::string_from_bytes(content_disposition_bytes).map_err(|_| { - Error::bad_database( - "Content Disposition in mediaid_file is invalid unicode.", - ) - })?, - ) - }; - - if let Ok(image) = image::load_from_memory(&file) { - let original_width = image.width(); - let original_height = image.height(); - if width > original_width || height > original_height { - return Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })); - } - - let thumbnail = if crop { - image.resize_to_fill(width, height, FilterType::CatmullRom) - } else { - let (exact_width, exact_height) = { - // Copied from image::dynimage::resize_dimensions - let ratio = u64::from(original_width) * u64::from(height); - let nratio = u64::from(width) * u64::from(original_height); - - let use_width = nratio <= ratio; - let intermediate = if use_width { - u64::from(original_height) * u64::from(width) - / u64::from(original_width) - } else { - u64::from(original_width) * u64::from(height) - / u64::from(original_height) - }; - if use_width { - if intermediate <= u64::from(::std::u32::MAX) { - (width, intermediate as u32) - } else { - ( - (u64::from(width) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ::std::u32::MAX, - ) - } - } else if intermediate <= u64::from(::std::u32::MAX) { - (intermediate as u32, height) - } else { - ( - ::std::u32::MAX, - (u64::from(height) * u64::from(::std::u32::MAX) / intermediate) - as u32, - ) - } - }; - - image.thumbnail_exact(exact_width, exact_height) - }; - - let mut thumbnail_bytes = Vec::new(); - thumbnail.write_to(&mut thumbnail_bytes, image::ImageOutputFormat::Png)?; - - // Save thumbnail in database so we don't have to generate it again next time - let mut thumbnail_key = key.to_vec(); - let width_index = thumbnail_key - .iter() - .position(|&b| b == 0xff) - .ok_or_else(|| Error::bad_database("Media in db is invalid."))? - + 1; - let mut widthheight = width.to_be_bytes().to_vec(); - widthheight.extend_from_slice(&height.to_be_bytes()); - - thumbnail_key.splice( - width_index..width_index + 2 * mem::size_of::(), - widthheight, - ); - - let path = globals.get_media_file(&thumbnail_key); - let mut f = File::create(path).await?; - f.write_all(&thumbnail_bytes).await?; - - self.mediaid_file.insert(&thumbnail_key, &[])?; - - Ok(Some(FileMeta { - content_disposition, - content_type, - file: thumbnail_bytes.to_vec(), - })) - } else { - // Couldn't parse file to generate thumbnail, send original - Ok(Some(FileMeta { - content_disposition, - content_type, - file: file.to_vec(), - })) - } - } else { - Ok(None) - } - } -} diff --git a/src/database/mod.rs b/src/database/mod.rs new file mode 100644 index 00000000..15ee1373 --- /dev/null +++ b/src/database/mod.rs @@ -0,0 +1,936 @@ +pub mod abstraction; +pub mod key_value; + +use crate::{services, utils, Config, Error, PduEvent, Result, Services, SERVICES}; +use abstraction::{KeyValueDatabaseEngine, KvTree}; +use directories::ProjectDirs; +use lru_cache::LruCache; +use ruma::{ + events::{ + push_rules::PushRulesEventContent, room::message::RoomMessageEventContent, + GlobalAccountDataEvent, GlobalAccountDataEventType, StateEventType, + }, + push::Ruleset, + CanonicalJsonValue, EventId, OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, + UserId, +}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fs::{self, remove_dir_all}, + io::Write, + mem::size_of, + path::Path, + sync::{Arc, Mutex, RwLock}, +}; + +use tracing::{debug, error, info, warn}; + +pub struct KeyValueDatabase { + _db: Arc, + + //pub globals: globals::Globals, + pub(super) global: Arc, + pub(super) server_signingkeys: Arc, + + //pub users: users::Users, + pub(super) userid_password: Arc, + pub(super) userid_displayname: Arc, + pub(super) userid_avatarurl: Arc, + pub(super) userid_blurhash: Arc, + pub(super) userdeviceid_token: Arc, + pub(super) userdeviceid_metadata: Arc, // This is also used to check if a device exists + pub(super) userid_devicelistversion: Arc, // DevicelistVersion = u64 + pub(super) token_userdeviceid: Arc, + + pub(super) onetimekeyid_onetimekeys: Arc, // OneTimeKeyId = UserId + DeviceKeyId + pub(super) userid_lastonetimekeyupdate: Arc, // LastOneTimeKeyUpdate = Count + pub(super) keychangeid_userid: Arc, // KeyChangeId = UserId/RoomId + Count + pub(super) keyid_key: Arc, // KeyId = UserId + KeyId (depends on key type) + pub(super) userid_masterkeyid: Arc, + pub(super) userid_selfsigningkeyid: Arc, + pub(super) userid_usersigningkeyid: Arc, + + pub(super) userfilterid_filter: Arc, // UserFilterId = UserId + FilterId + + pub(super) todeviceid_events: Arc, // ToDeviceId = UserId + DeviceId + Count + + //pub uiaa: uiaa::Uiaa, + pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication + pub(super) userdevicesessionid_uiaarequest: + RwLock>, + + //pub edus: RoomEdus, + pub(super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId + pub(super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count + pub(super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count + pub(super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count + pub(super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count + pub(super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId + pub(super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count + + //pub rooms: rooms::Rooms, + pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count + pub(super) eventid_pduid: Arc, + pub(super) roomid_pduleaves: Arc, + pub(super) alias_roomid: Arc, + pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count + pub(super) publicroomids: Arc, + + pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount + + /// Participating servers in a room. + pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName + pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId + + pub(super) userroomid_joined: Arc, + pub(super) roomuserid_joined: Arc, + pub(super) roomid_joinedcount: Arc, + pub(super) roomid_invitedcount: Arc, + pub(super) roomuseroncejoinedids: Arc, + pub(super) userroomid_invitestate: Arc, // InviteState = Vec> + pub(super) roomuserid_invitecount: Arc, // InviteCount = Count + pub(super) userroomid_leftstate: Arc, + pub(super) roomuserid_leftcount: Arc, + + pub(super) disabledroomids: Arc, // Rooms where incoming federation handling is disabled + + pub(super) lazyloadedids: Arc, // LazyLoadedIds = UserId + DeviceId + RoomId + LazyLoadedUserId + + pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 + pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 + + /// Remember the current state hash of a room. + pub(super) roomid_shortstatehash: Arc, + pub(super) roomsynctoken_shortstatehash: Arc, + /// Remember the state hash at events in the past. + pub(super) shorteventid_shortstatehash: Arc, + /// StateKey = EventType + StateKey, ShortStateKey = Count + pub(super) statekey_shortstatekey: Arc, + pub(super) shortstatekey_statekey: Arc, + + pub(super) roomid_shortroomid: Arc, + + pub(super) shorteventid_eventid: Arc, + pub(super) eventid_shorteventid: Arc, + + pub(super) statehash_shortstatehash: Arc, + pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) + + pub(super) shorteventid_authchain: Arc, + + /// RoomId + EventId -> outlier PDU. + /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. + pub(super) eventid_outlierpdu: Arc, + pub(super) softfailedeventids: Arc, + + /// RoomId + EventId -> Parent PDU EventId. + pub(super) referencedevents: Arc, + + //pub account_data: account_data::AccountData, + pub(super) roomuserdataid_accountdata: Arc, // RoomUserDataId = Room + User + Count + Type + pub(super) roomusertype_roomuserdataid: Arc, // RoomUserType = Room + User + Type + + //pub media: media::Media, + pub(super) mediaid_file: Arc, // MediaId = MXC + WidthHeight + ContentDisposition + ContentType + //pub key_backups: key_backups::KeyBackups, + pub(super) backupid_algorithm: Arc, // BackupId = UserId + Version(Count) + pub(super) backupid_etag: Arc, // BackupId = UserId + Version(Count) + pub(super) backupkeyid_backup: Arc, // BackupKeyId = UserId + Version + RoomId + SessionId + + //pub transaction_ids: transaction_ids::TransactionIds, + pub(super) userdevicetxnid_response: Arc, // Response can be empty (/sendToDevice) or the event id (/send) + //pub sending: sending::Sending, + pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync + pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content + pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content + + //pub appservice: appservice::Appservice, + pub(super) id_appserviceregistrations: Arc, + + //pub pusher: pusher::PushData, + pub(super) senderkey_pusher: Arc, + + pub(super) cached_registrations: Arc>>, + pub(super) pdu_cache: Mutex>>, + pub(super) shorteventid_cache: Mutex>>, + pub(super) auth_chain_cache: Mutex, Arc>>>, + pub(super) eventidshort_cache: Mutex>, + pub(super) statekeyshort_cache: Mutex>, + pub(super) shortstatekey_cache: Mutex>, + pub(super) our_real_users_cache: RwLock>>>, + pub(super) appservice_in_room_cache: RwLock>>, + pub(super) lasttimelinecount_cache: Mutex>, +} + +impl KeyValueDatabase { + /// Tries to remove the old database but ignores all errors. + pub fn try_remove(server_name: &str) -> Result<()> { + let mut path = ProjectDirs::from("xyz", "koesters", "conduit") + .ok_or_else(|| Error::bad_config("The OS didn't return a valid home directory path."))? + .data_dir() + .to_path_buf(); + path.push(server_name); + let _ = remove_dir_all(path); + + Ok(()) + } + + fn check_db_setup(config: &Config) -> Result<()> { + let path = Path::new(&config.database_path); + + let sled_exists = path.join("db").exists(); + let sqlite_exists = path.join("conduit.db").exists(); + let rocksdb_exists = path.join("IDENTITY").exists(); + + let mut count = 0; + + if sled_exists { + count += 1; + } + + if sqlite_exists { + count += 1; + } + + if rocksdb_exists { + count += 1; + } + + if count > 1 { + warn!("Multiple databases at database_path detected"); + return Ok(()); + } + + if sled_exists && config.database_backend != "sled" { + return Err(Error::bad_config( + "Found sled at database_path, but is not specified in config.", + )); + } + + if sqlite_exists && config.database_backend != "sqlite" { + return Err(Error::bad_config( + "Found sqlite at database_path, but is not specified in config.", + )); + } + + if rocksdb_exists && config.database_backend != "rocksdb" { + return Err(Error::bad_config( + "Found rocksdb at database_path, but is not specified in config.", + )); + } + + Ok(()) + } + + /// Load an existing database or create a new one. + pub async fn load_or_create(config: Config) -> Result<()> { + Self::check_db_setup(&config)?; + + if !Path::new(&config.database_path).exists() { + std::fs::create_dir_all(&config.database_path) + .map_err(|_| Error::BadConfig("Database folder doesn't exists and couldn't be created (e.g. due to missing permissions). Please create the database folder yourself."))?; + } + + let builder: Arc = match &*config.database_backend { + "sqlite" => { + #[cfg(not(feature = "sqlite"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "sqlite")] + Arc::new(Arc::::open(&config)?) + } + "rocksdb" => { + #[cfg(not(feature = "rocksdb"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "rocksdb")] + Arc::new(Arc::::open(&config)?) + } + "persy" => { + #[cfg(not(feature = "persy"))] + return Err(Error::BadConfig("Database backend not found.")); + #[cfg(feature = "persy")] + Arc::new(Arc::::open(&config)?) + } + _ => { + return Err(Error::BadConfig("Database backend not found.")); + } + }; + + if config.max_request_size < 1024 { + eprintln!("ERROR: Max request size is less than 1KB. Please increase it."); + } + + let db_raw = Box::new(Self { + _db: builder.clone(), + userid_password: builder.open_tree("userid_password")?, + userid_displayname: builder.open_tree("userid_displayname")?, + userid_avatarurl: builder.open_tree("userid_avatarurl")?, + userid_blurhash: builder.open_tree("userid_blurhash")?, + userdeviceid_token: builder.open_tree("userdeviceid_token")?, + userdeviceid_metadata: builder.open_tree("userdeviceid_metadata")?, + userid_devicelistversion: builder.open_tree("userid_devicelistversion")?, + token_userdeviceid: builder.open_tree("token_userdeviceid")?, + onetimekeyid_onetimekeys: builder.open_tree("onetimekeyid_onetimekeys")?, + userid_lastonetimekeyupdate: builder.open_tree("userid_lastonetimekeyupdate")?, + keychangeid_userid: builder.open_tree("keychangeid_userid")?, + keyid_key: builder.open_tree("keyid_key")?, + userid_masterkeyid: builder.open_tree("userid_masterkeyid")?, + userid_selfsigningkeyid: builder.open_tree("userid_selfsigningkeyid")?, + userid_usersigningkeyid: builder.open_tree("userid_usersigningkeyid")?, + userfilterid_filter: builder.open_tree("userfilterid_filter")?, + todeviceid_events: builder.open_tree("todeviceid_events")?, + + userdevicesessionid_uiaainfo: builder.open_tree("userdevicesessionid_uiaainfo")?, + userdevicesessionid_uiaarequest: RwLock::new(BTreeMap::new()), + readreceiptid_readreceipt: builder.open_tree("readreceiptid_readreceipt")?, + roomuserid_privateread: builder.open_tree("roomuserid_privateread")?, // "Private" read receipt + roomuserid_lastprivatereadupdate: builder + .open_tree("roomuserid_lastprivatereadupdate")?, + typingid_userid: builder.open_tree("typingid_userid")?, + roomid_lasttypingupdate: builder.open_tree("roomid_lasttypingupdate")?, + presenceid_presence: builder.open_tree("presenceid_presence")?, + userid_lastpresenceupdate: builder.open_tree("userid_lastpresenceupdate")?, + pduid_pdu: builder.open_tree("pduid_pdu")?, + eventid_pduid: builder.open_tree("eventid_pduid")?, + roomid_pduleaves: builder.open_tree("roomid_pduleaves")?, + + alias_roomid: builder.open_tree("alias_roomid")?, + aliasid_alias: builder.open_tree("aliasid_alias")?, + publicroomids: builder.open_tree("publicroomids")?, + + tokenids: builder.open_tree("tokenids")?, + + roomserverids: builder.open_tree("roomserverids")?, + serverroomids: builder.open_tree("serverroomids")?, + userroomid_joined: builder.open_tree("userroomid_joined")?, + roomuserid_joined: builder.open_tree("roomuserid_joined")?, + roomid_joinedcount: builder.open_tree("roomid_joinedcount")?, + roomid_invitedcount: builder.open_tree("roomid_invitedcount")?, + roomuseroncejoinedids: builder.open_tree("roomuseroncejoinedids")?, + userroomid_invitestate: builder.open_tree("userroomid_invitestate")?, + roomuserid_invitecount: builder.open_tree("roomuserid_invitecount")?, + userroomid_leftstate: builder.open_tree("userroomid_leftstate")?, + roomuserid_leftcount: builder.open_tree("roomuserid_leftcount")?, + + disabledroomids: builder.open_tree("disabledroomids")?, + + lazyloadedids: builder.open_tree("lazyloadedids")?, + + userroomid_notificationcount: builder.open_tree("userroomid_notificationcount")?, + userroomid_highlightcount: builder.open_tree("userroomid_highlightcount")?, + + statekey_shortstatekey: builder.open_tree("statekey_shortstatekey")?, + shortstatekey_statekey: builder.open_tree("shortstatekey_statekey")?, + + shorteventid_authchain: builder.open_tree("shorteventid_authchain")?, + + roomid_shortroomid: builder.open_tree("roomid_shortroomid")?, + + shortstatehash_statediff: builder.open_tree("shortstatehash_statediff")?, + eventid_shorteventid: builder.open_tree("eventid_shorteventid")?, + shorteventid_eventid: builder.open_tree("shorteventid_eventid")?, + shorteventid_shortstatehash: builder.open_tree("shorteventid_shortstatehash")?, + roomid_shortstatehash: builder.open_tree("roomid_shortstatehash")?, + roomsynctoken_shortstatehash: builder.open_tree("roomsynctoken_shortstatehash")?, + statehash_shortstatehash: builder.open_tree("statehash_shortstatehash")?, + + eventid_outlierpdu: builder.open_tree("eventid_outlierpdu")?, + softfailedeventids: builder.open_tree("softfailedeventids")?, + + referencedevents: builder.open_tree("referencedevents")?, + roomuserdataid_accountdata: builder.open_tree("roomuserdataid_accountdata")?, + roomusertype_roomuserdataid: builder.open_tree("roomusertype_roomuserdataid")?, + mediaid_file: builder.open_tree("mediaid_file")?, + backupid_algorithm: builder.open_tree("backupid_algorithm")?, + backupid_etag: builder.open_tree("backupid_etag")?, + backupkeyid_backup: builder.open_tree("backupkeyid_backup")?, + userdevicetxnid_response: builder.open_tree("userdevicetxnid_response")?, + servername_educount: builder.open_tree("servername_educount")?, + servernameevent_data: builder.open_tree("servernameevent_data")?, + servercurrentevent_data: builder.open_tree("servercurrentevent_data")?, + id_appserviceregistrations: builder.open_tree("id_appserviceregistrations")?, + senderkey_pusher: builder.open_tree("senderkey_pusher")?, + global: builder.open_tree("global")?, + server_signingkeys: builder.open_tree("server_signingkeys")?, + + cached_registrations: Arc::new(RwLock::new(HashMap::new())), + pdu_cache: Mutex::new(LruCache::new( + config + .pdu_cache_capacity + .try_into() + .expect("pdu cache capacity fits into usize"), + )), + auth_chain_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shorteventid_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + eventidshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + shortstatekey_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + statekeyshort_cache: Mutex::new(LruCache::new( + (100_000.0 * config.conduit_cache_capacity_modifier) as usize, + )), + our_real_users_cache: RwLock::new(HashMap::new()), + appservice_in_room_cache: RwLock::new(HashMap::new()), + lasttimelinecount_cache: Mutex::new(HashMap::new()), + }); + + let db = Box::leak(db_raw); + + let services_raw = Box::new(Services::build(db, config)?); + + // This is the first and only time we initialize the SERVICE static + *SERVICES.write().unwrap() = Some(Box::leak(services_raw)); + + // Matrix resource ownership is based on the server name; changing it + // requires recreating the database from scratch. + if services().users.count()? > 0 { + let conduit_user = + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); + + if !services().users.exists(&conduit_user)? { + error!( + "The {} server user does not exist, and the database is not new.", + conduit_user + ); + return Err(Error::bad_database( + "Cannot reuse an existing database after changing the server name, please delete the old one first." + )); + } + } + + // If the database has any data, perform data migrations before starting + let latest_database_version = 11; + + if services().users.count()? > 0 { + // MIGRATIONS + if services().globals.database_version()? < 1 { + for (roomserverid, _) in db.roomserverids.iter() { + let mut parts = roomserverid.split(|&b| b == 0xff); + let room_id = parts.next().expect("split always returns one element"); + let servername = match parts.next() { + Some(s) => s, + None => { + error!("Migration: Invalid roomserverid in db."); + continue; + } + }; + let mut serverroomid = servername.to_vec(); + serverroomid.push(0xff); + serverroomid.extend_from_slice(room_id); + + db.serverroomids.insert(&serverroomid, &[])?; + } + + services().globals.bump_database_version(1)?; + + warn!("Migration: 0 -> 1 finished"); + } + + if services().globals.database_version()? < 2 { + // We accidentally inserted hashed versions of "" into the db instead of just "" + for (userid, password) in db.userid_password.iter() { + let password = utils::string_from_bytes(&password); + + let empty_hashed_password = password.map_or(false, |password| { + argon2::verify_encoded(&password, b"").unwrap_or(false) + }); + + if empty_hashed_password { + db.userid_password.insert(&userid, b"")?; + } + } + + services().globals.bump_database_version(2)?; + + warn!("Migration: 1 -> 2 finished"); + } + + if services().globals.database_version()? < 3 { + // Move media to filesystem + for (key, content) in db.mediaid_file.iter() { + if content.is_empty() { + continue; + } + + let path = services().globals.get_media_file(&key); + let mut file = fs::File::create(path)?; + file.write_all(&content)?; + db.mediaid_file.insert(&key, &[])?; + } + + services().globals.bump_database_version(3)?; + + warn!("Migration: 2 -> 3 finished"); + } + + if services().globals.database_version()? < 4 { + // Add federated users to services() as deactivated + for our_user in services().users.iter() { + let our_user = our_user?; + if services().users.is_deactivated(&our_user)? { + continue; + } + for room in services().rooms.state_cache.rooms_joined(&our_user) { + for user in services().rooms.state_cache.room_members(&room?) { + let user = user?; + if user.server_name() != services().globals.server_name() { + println!("Migration: Creating user {}", user); + services().users.create(&user, None)?; + } + } + } + } + + services().globals.bump_database_version(4)?; + + warn!("Migration: 3 -> 4 finished"); + } + + if services().globals.database_version()? < 5 { + // Upgrade user data store + for (roomuserdataid, _) in db.roomuserdataid_accountdata.iter() { + let mut parts = roomuserdataid.split(|&b| b == 0xff); + let room_id = parts.next().unwrap(); + let user_id = parts.next().unwrap(); + let event_type = roomuserdataid.rsplit(|&b| b == 0xff).next().unwrap(); + + let mut key = room_id.to_vec(); + key.push(0xff); + key.extend_from_slice(user_id); + key.push(0xff); + key.extend_from_slice(event_type); + + db.roomusertype_roomuserdataid + .insert(&key, &roomuserdataid)?; + } + + services().globals.bump_database_version(5)?; + + warn!("Migration: 4 -> 5 finished"); + } + + if services().globals.database_version()? < 6 { + // Set room member count + for (roomid, _) in db.roomid_shortstatehash.iter() { + let string = utils::string_from_bytes(&roomid).unwrap(); + let room_id = <&RoomId>::try_from(string.as_str()).unwrap(); + services().rooms.state_cache.update_joined_count(room_id)?; + } + + services().globals.bump_database_version(6)?; + + warn!("Migration: 5 -> 6 finished"); + } + + if services().globals.database_version()? < 7 { + // Upgrade state store + let mut last_roomstates: HashMap = HashMap::new(); + let mut current_sstatehash: Option = None; + let mut current_room = None; + let mut current_state = HashSet::new(); + let mut counter = 0; + + let mut handle_state = + |current_sstatehash: u64, + current_room: &RoomId, + current_state: HashSet<_>, + last_roomstates: &mut HashMap<_, _>| { + counter += 1; + println!("counter: {}", counter); + let last_roomsstatehash = last_roomstates.get(current_room); + + let states_parents = last_roomsstatehash.map_or_else( + || Ok(Vec::new()), + |&last_roomsstatehash| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(last_roomsstatehash) + }, + )?; + + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew = current_state + .difference(&parent_stateinfo.1) + .copied() + .collect::>(); + + let statediffremoved = parent_stateinfo + .1 + .difference(¤t_state) + .copied() + .collect::>(); + + (statediffnew, statediffremoved) + } else { + (current_state, HashSet::new()) + }; + + services().rooms.state_compressor.save_state_from_diff( + current_sstatehash, + statediffnew, + statediffremoved, + 2, // every state change is 2 event changes on average + states_parents, + )?; + + /* + let mut tmp = services().rooms.load_shortstatehash_info(¤t_sstatehash)?; + let state = tmp.pop().unwrap(); + println!( + "{}\t{}{:?}: {:?} + {:?} - {:?}", + current_room, + " ".repeat(tmp.len()), + utils::u64_from_bytes(¤t_sstatehash).unwrap(), + tmp.last().map(|b| utils::u64_from_bytes(&b.0).unwrap()), + state + .2 + .iter() + .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) + .collect::>(), + state + .3 + .iter() + .map(|b| utils::u64_from_bytes(&b[size_of::()..]).unwrap()) + .collect::>() + ); + */ + + Ok::<_, Error>(()) + }; + + for (k, seventid) in db._db.open_tree("stateid_shorteventid")?.iter() { + let sstatehash = utils::u64_from_bytes(&k[0..size_of::()]) + .expect("number of bytes is correct"); + let sstatekey = k[size_of::()..].to_vec(); + if Some(sstatehash) != current_sstatehash { + if let Some(current_sstatehash) = current_sstatehash { + handle_state( + current_sstatehash, + current_room.as_deref().unwrap(), + current_state, + &mut last_roomstates, + )?; + last_roomstates + .insert(current_room.clone().unwrap(), current_sstatehash); + } + current_state = HashSet::new(); + current_sstatehash = Some(sstatehash); + + let event_id = db.shorteventid_eventid.get(&seventid).unwrap().unwrap(); + let string = utils::string_from_bytes(&event_id).unwrap(); + let event_id = <&EventId>::try_from(string.as_str()).unwrap(); + let pdu = services() + .rooms + .timeline + .get_pdu(event_id) + .unwrap() + .unwrap(); + + if Some(&pdu.room_id) != current_room.as_ref() { + current_room = Some(pdu.room_id.clone()); + } + } + + let mut val = sstatekey; + val.extend_from_slice(&seventid); + current_state.insert(val.try_into().expect("size is correct")); + } + + if let Some(current_sstatehash) = current_sstatehash { + handle_state( + current_sstatehash, + current_room.as_deref().unwrap(), + current_state, + &mut last_roomstates, + )?; + } + + services().globals.bump_database_version(7)?; + + warn!("Migration: 6 -> 7 finished"); + } + + if services().globals.database_version()? < 8 { + // Generate short room ids for all rooms + for (room_id, _) in db.roomid_shortstatehash.iter() { + let shortroomid = services().globals.next_count()?.to_be_bytes(); + db.roomid_shortroomid.insert(&room_id, &shortroomid)?; + info!("Migration: 8"); + } + // Update pduids db layout + let mut batch = db.pduid_pdu.iter().filter_map(|(key, v)| { + if !key.starts_with(b"!") { + return None; + } + let mut parts = key.splitn(2, |&b| b == 0xff); + let room_id = parts.next().unwrap(); + let count = parts.next().unwrap(); + + let short_room_id = db + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + + let mut new_key = short_room_id; + new_key.extend_from_slice(count); + + Some((new_key, v)) + }); + + db.pduid_pdu.insert_batch(&mut batch)?; + + let mut batch2 = db.eventid_pduid.iter().filter_map(|(k, value)| { + if !value.starts_with(b"!") { + return None; + } + let mut parts = value.splitn(2, |&b| b == 0xff); + let room_id = parts.next().unwrap(); + let count = parts.next().unwrap(); + + let short_room_id = db + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + + let mut new_value = short_room_id; + new_value.extend_from_slice(count); + + Some((k, new_value)) + }); + + db.eventid_pduid.insert_batch(&mut batch2)?; + + services().globals.bump_database_version(8)?; + + warn!("Migration: 7 -> 8 finished"); + } + + if services().globals.database_version()? < 9 { + // Update tokenids db layout + let mut iter = db + .tokenids + .iter() + .filter_map(|(key, _)| { + if !key.starts_with(b"!") { + return None; + } + let mut parts = key.splitn(4, |&b| b == 0xff); + let room_id = parts.next().unwrap(); + let word = parts.next().unwrap(); + let _pdu_id_room = parts.next().unwrap(); + let pdu_id_count = parts.next().unwrap(); + + let short_room_id = db + .roomid_shortroomid + .get(room_id) + .unwrap() + .expect("shortroomid should exist"); + let mut new_key = short_room_id; + new_key.extend_from_slice(word); + new_key.push(0xff); + new_key.extend_from_slice(pdu_id_count); + println!("old {:?}", key); + println!("new {:?}", new_key); + Some((new_key, Vec::new())) + }) + .peekable(); + + while iter.peek().is_some() { + db.tokenids.insert_batch(&mut iter.by_ref().take(1000))?; + println!("smaller batch done"); + } + + info!("Deleting starts"); + + let batch2: Vec<_> = db + .tokenids + .iter() + .filter_map(|(key, _)| { + if key.starts_with(b"!") { + println!("del {:?}", key); + Some(key) + } else { + None + } + }) + .collect(); + + for key in batch2 { + println!("del"); + db.tokenids.remove(&key)?; + } + + services().globals.bump_database_version(9)?; + + warn!("Migration: 8 -> 9 finished"); + } + + if services().globals.database_version()? < 10 { + // Add other direction for shortstatekeys + for (statekey, shortstatekey) in db.statekey_shortstatekey.iter() { + db.shortstatekey_statekey + .insert(&shortstatekey, &statekey)?; + } + + // Force E2EE device list updates so we can send them over federation + for user_id in services().users.iter().filter_map(|r| r.ok()) { + services().users.mark_device_key_update(&user_id)?; + } + + services().globals.bump_database_version(10)?; + + warn!("Migration: 9 -> 10 finished"); + } + + if services().globals.database_version()? < 11 { + db._db + .open_tree("userdevicesessionid_uiaarequest")? + .clear()?; + services().globals.bump_database_version(11)?; + + warn!("Migration: 10 -> 11 finished"); + } + + assert_eq!(11, latest_database_version); + + info!( + "Loaded {} database with version {}", + services().globals.config.database_backend, + latest_database_version + ); + } else { + services() + .globals + .bump_database_version(latest_database_version)?; + + // Create the admin room and server user on first run + services().admin.create_admin_room().await?; + + warn!( + "Created new {} database with version {}", + services().globals.config.database_backend, + latest_database_version + ); + } + + // This data is probably outdated + db.presenceid_presence.clear()?; + + services().admin.start_handler(); + + // Set emergency access for the conduit user + match set_emergency_access() { + Ok(pwd_set) => { + if pwd_set { + warn!("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!"); + services().admin.send_message(RoomMessageEventContent::text_plain("The Conduit account emergency password is set! Please unset it as soon as you finish admin account recovery!")); + } + } + Err(e) => { + error!( + "Could not set the configured emergency password for the conduit user: {}", + e + ) + } + }; + + services().sending.start_handler(); + + Self::start_cleanup_task().await; + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn flush(&self) -> Result<()> { + let start = std::time::Instant::now(); + + let res = self._db.flush(); + + debug!("flush: took {:?}", start.elapsed()); + + res + } + + #[tracing::instrument] + pub async fn start_cleanup_task() { + use tokio::time::interval; + + #[cfg(unix)] + use tokio::signal::unix::{signal, SignalKind}; + use tracing::info; + + use std::time::{Duration, Instant}; + + let timer_interval = + Duration::from_secs(services().globals.config.cleanup_second_interval as u64); + + tokio::spawn(async move { + let mut i = interval(timer_interval); + #[cfg(unix)] + let mut s = signal(SignalKind::hangup()).unwrap(); + + loop { + #[cfg(unix)] + tokio::select! { + _ = i.tick() => { + info!("cleanup: Timer ticked"); + } + _ = s.recv() => { + info!("cleanup: Received SIGHUP"); + } + }; + #[cfg(not(unix))] + { + i.tick().await; + info!("cleanup: Timer ticked") + } + + let start = Instant::now(); + if let Err(e) = services().globals.cleanup() { + error!("cleanup: Errored: {}", e); + } else { + info!("cleanup: Finished in {:?}", start.elapsed()); + } + } + }); + } +} + +/// Sets the emergency password and push rules for the @conduit account in case emergency password is set +fn set_emergency_access() -> Result { + let conduit_user = UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is a valid UserId"); + + services().users.set_password( + &conduit_user, + services().globals.emergency_password().as_deref(), + )?; + + let (ruleset, res) = match services().globals.emergency_password() { + Some(_) => (Ruleset::server_default(&conduit_user), Ok(true)), + None => (Ruleset::new(), Ok(false)), + }; + + services().account_data.update( + None, + &conduit_user, + GlobalAccountDataEventType::PushRules.to_string().into(), + &serde_json::to_value(&GlobalAccountDataEvent { + content: PushRulesEventContent { global: ruleset }, + }) + .expect("to json value always works"), + )?; + + res +} diff --git a/src/database/pusher.rs b/src/database/pusher.rs deleted file mode 100644 index f53f137b..00000000 --- a/src/database/pusher.rs +++ /dev/null @@ -1,348 +0,0 @@ -use crate::{Database, Error, PduEvent, Result}; -use bytes::BytesMut; -use ruma::{ - api::{ - client::r0::push::{get_pushers, set_pusher, PusherKind}, - push_gateway::send_event_notification::{ - self, - v1::{Device, Notification, NotificationCounts, NotificationPriority}, - }, - IncomingResponse, OutgoingRequest, SendAccessToken, - }, - events::{ - room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, - AnySyncRoomEvent, EventType, - }, - push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, - serde::Raw, - uint, RoomId, UInt, UserId, -}; -use tracing::{error, info, warn}; - -use std::{convert::TryFrom, fmt::Debug, mem, sync::Arc}; - -use super::abstraction::Tree; - -pub struct PushData { - /// UserId + pushkey -> Pusher - pub(super) senderkey_pusher: Arc, -} - -impl PushData { - #[tracing::instrument(skip(self, sender, pusher))] - pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::Pusher) -> Result<()> { - let mut key = sender.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pusher.pushkey.as_bytes()); - - // There are 2 kinds of pushers but the spec says: null deletes the pusher. - if pusher.kind.is_none() { - return self - .senderkey_pusher - .remove(&key) - .map(|_| ()) - .map_err(Into::into); - } - - self.senderkey_pusher.insert( - &key, - &serde_json::to_vec(&pusher).expect("Pusher is valid JSON value"), - )?; - - Ok(()) - } - - #[tracing::instrument(skip(self, senderkey))] - pub fn get_pusher(&self, senderkey: &[u8]) -> Result> { - self.senderkey_pusher - .get(senderkey)? - .map(|push| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self, sender))] - pub fn get_pushers(&self, sender: &UserId) -> Result> { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher - .scan_prefix(prefix) - .map(|(_, push)| { - serde_json::from_slice(&*push) - .map_err(|_| Error::bad_database("Invalid Pusher in db.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, sender))] - pub fn get_pusher_senderkeys<'a>( - &'a self, - sender: &UserId, - ) -> impl Iterator> + 'a { - let mut prefix = sender.as_bytes().to_vec(); - prefix.push(0xff); - - self.senderkey_pusher.scan_prefix(prefix).map(|(k, _)| k) - } -} - -#[tracing::instrument(skip(globals, destination, request))] -pub async fn send_request( - globals: &crate::database::globals::Globals, - destination: &str, - request: T, -) -> Result -where - T: Debug, -{ - let destination = destination.replace("/_matrix/push/v1/notify", ""); - - let http_request = request - .try_into_http_request::(&destination, SendAccessToken::IfRequired("")) - .map_err(|e| { - warn!("Failed to find destination {}: {}", destination, e); - Error::BadServerResponse("Invalid destination") - })? - .map(|body| body.freeze()); - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - // TODO: we could keep this very short and let expo backoff do it's thing... - //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); - - let url = reqwest_request.url().clone(); - let response = globals - .reqwest_client()? - .build()? - .execute(reqwest_request) - .await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - info!( - "Push gateway returned bad response {} {}\n{}\n{:?}", - destination, - status, - url, - crate::utils::string_from_bytes(&body) - ); - } - - let response = T::IncomingResponse::try_from_http_response( - http_response_builder - .body(body) - .expect("reqwest body is valid http body"), - ); - response.map_err(|_| { - info!( - "Push gateway returned invalid response bytes {}\n{}", - destination, url - ); - Error::BadServerResponse("Push gateway returned bad response.") - }) - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument(skip(user, unread, pusher, ruleset, pdu, db))] -pub async fn send_push_notice( - user: &UserId, - unread: UInt, - pusher: &get_pushers::Pusher, - ruleset: Ruleset, - pdu: &PduEvent, - db: &Database, -) -> Result<()> { - let mut notify = None; - let mut tweaks = Vec::new(); - - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - for action in get_actions( - user, - &ruleset, - &power_levels, - &pdu.to_sync_room_event(), - &pdu.room_id, - db, - )? { - let n = match action { - Action::DontNotify => false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => true, - Action::SetTweak(tweak) => { - tweaks.push(tweak.clone()); - continue; - } - }; - - if notify.is_some() { - return Err(Error::bad_database( - r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, - )); - } - - notify = Some(n); - } - - if notify == Some(true) { - send_notice(unread, pusher, tweaks, pdu, db).await?; - } - // Else the event triggered no actions - - Ok(()) -} - -#[tracing::instrument(skip(user, ruleset, pdu, db))] -pub fn get_actions<'a>( - user: &UserId, - ruleset: &'a Ruleset, - power_levels: &RoomPowerLevelsEventContent, - pdu: &Raw, - room_id: &RoomId, - db: &Database, -) -> Result<&'a [Action]> { - let ctx = PushConditionRoomCtx { - room_id: room_id.clone(), - member_count: 10_u32.into(), // TODO: get member count efficiently - user_display_name: db - .users - .displayname(user)? - .unwrap_or_else(|| user.localpart().to_owned()), - users_power_levels: power_levels.users.clone(), - default_power_level: power_levels.users_default, - notification_power_levels: power_levels.notifications.clone(), - }; - - Ok(ruleset.get_actions(pdu, &ctx)) -} - -#[tracing::instrument(skip(unread, pusher, tweaks, event, db))] -async fn send_notice( - unread: UInt, - pusher: &get_pushers::Pusher, - tweaks: Vec, - event: &PduEvent, - db: &Database, -) -> Result<()> { - // TODO: email - if pusher.kind == PusherKind::Email { - return Ok(()); - } - - // TODO: - // Two problems with this - // 1. if "event_id_only" is the only format kind it seems we should never add more info - // 2. can pusher/devices have conflicting formats - let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); - let url = if let Some(url) = &pusher.data.url { - url - } else { - error!("Http Pusher must have URL specified."); - return Ok(()); - }; - - let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); - let mut data_minus_url = pusher.data.clone(); - // The url must be stripped off according to spec - data_minus_url.url = None; - device.data = Some(data_minus_url); - - // Tweaks are only added if the format is NOT event_id_only - if !event_id_only { - device.tweaks = tweaks.clone(); - } - - let d = &[device]; - let mut notifi = Notification::new(d); - - notifi.prio = NotificationPriority::Low; - notifi.event_id = Some(&event.event_id); - notifi.room_id = Some(&event.room_id); - // TODO: missed calls - notifi.counts = NotificationCounts::new(unread, uint!(0)); - - if event.kind == EventType::RoomEncrypted - || tweaks - .iter() - .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) - { - notifi.prio = NotificationPriority::High - } - - if event_id_only { - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } else { - notifi.sender = Some(&event.sender); - notifi.event_type = Some(&event.kind); - let content = serde_json::value::to_raw_value(&event.content).ok(); - notifi.content = content.as_deref(); - - if event.kind == EventType::RoomMember { - notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); - } - - let user_name = db.users.displayname(&event.sender)?; - notifi.sender_display_name = user_name.as_deref(); - - let room_name = if let Some(room_name_pdu) = - db.rooms - .room_state_get(&event.room_id, &EventType::RoomName, "")? - { - serde_json::from_str::(room_name_pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid room name event in database."))? - .name - } else { - None - }; - - notifi.room_name = room_name.as_deref(); - - send_request( - &db.globals, - url, - send_event_notification::v1::Request::new(notifi), - ) - .await?; - } - - // TODO: email - - Ok(()) -} diff --git a/src/database/rooms.rs b/src/database/rooms.rs deleted file mode 100644 index c5b795bd..00000000 --- a/src/database/rooms.rs +++ /dev/null @@ -1,3451 +0,0 @@ -mod edus; - -pub use edus::RoomEdus; - -use crate::{ - pdu::{EventHash, PduBuilder}, - server_server, utils, Database, Error, PduEvent, Result, -}; -use lru_cache::LruCache; -use regex::Regex; -use ring::digest; -use rocket::http::RawStr; -use ruma::{ - api::{client::error::ErrorKind, federation}, - events::{ - direct::DirectEvent, - ignored_user_list::IgnoredUserListEvent, - push_rules::PushRulesEvent, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - message::RoomMessageEventContent, - power_levels::RoomPowerLevelsEventContent, - }, - tag::TagEvent, - AnyStrippedStateEvent, AnySyncStateEvent, EventType, - }, - push::{Action, Ruleset, Tweak}, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res::{self, RoomVersion, StateMap}, - uint, EventId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, -}; -use serde::Deserialize; -use serde_json::value::to_raw_value; -use std::{ - borrow::Cow, - collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, - mem::size_of, - sync::{Arc, Mutex, RwLock}, - time::Instant, -}; -use tokio::sync::MutexGuard; -use tracing::{error, warn}; - -use super::{abstraction::Tree, admin::AdminCommand, pusher}; - -/// The unique identifier of each state group. -/// -/// This is created when a state group is added to the database by -/// hashing the entire state. -pub type StateHashId = Vec; -pub type CompressedStateEvent = [u8; 2 * size_of::()]; - -pub struct Rooms { - pub edus: RoomEdus, - pub(super) pduid_pdu: Arc, // PduId = ShortRoomId + Count - pub(super) eventid_pduid: Arc, - pub(super) roomid_pduleaves: Arc, - pub(super) alias_roomid: Arc, - pub(super) aliasid_alias: Arc, // AliasId = RoomId + Count - pub(super) publicroomids: Arc, - - pub(super) tokenids: Arc, // TokenId = ShortRoomId + Token + PduIdCount - - /// Participating servers in a room. - pub(super) roomserverids: Arc, // RoomServerId = RoomId + ServerName - pub(super) serverroomids: Arc, // ServerRoomId = ServerName + RoomId - - pub(super) userroomid_joined: Arc, - pub(super) roomuserid_joined: Arc, - pub(super) roomid_joinedcount: Arc, - pub(super) roomid_invitedcount: Arc, - pub(super) roomuseroncejoinedids: Arc, - pub(super) userroomid_invitestate: Arc, // InviteState = Vec> - pub(super) roomuserid_invitecount: Arc, // InviteCount = Count - pub(super) userroomid_leftstate: Arc, - pub(super) roomuserid_leftcount: Arc, - - pub(super) userroomid_notificationcount: Arc, // NotifyCount = u64 - pub(super) userroomid_highlightcount: Arc, // HightlightCount = u64 - - /// Remember the current state hash of a room. - pub(super) roomid_shortstatehash: Arc, - pub(super) roomsynctoken_shortstatehash: Arc, - /// Remember the state hash at events in the past. - pub(super) shorteventid_shortstatehash: Arc, - /// StateKey = EventType + StateKey, ShortStateKey = Count - pub(super) statekey_shortstatekey: Arc, - pub(super) shortstatekey_statekey: Arc, - - pub(super) roomid_shortroomid: Arc, - - pub(super) shorteventid_eventid: Arc, - pub(super) eventid_shorteventid: Arc, - - pub(super) statehash_shortstatehash: Arc, - pub(super) shortstatehash_statediff: Arc, // StateDiff = parent (or 0) + (shortstatekey+shorteventid++) + 0_u64 + (shortstatekey+shorteventid--) - - pub(super) shorteventid_authchain: Arc, - - /// RoomId + EventId -> outlier PDU. - /// Any pdu that has passed the steps 1-8 in the incoming event /federation/send/txn. - pub(super) eventid_outlierpdu: Arc, - pub(super) softfailedeventids: Arc, - - /// RoomId + EventId -> Parent PDU EventId. - pub(super) referencedevents: Arc, - - pub(super) pdu_cache: Mutex>>, - pub(super) shorteventid_cache: Mutex>>, - pub(super) auth_chain_cache: Mutex, Arc>>>, - pub(super) eventidshort_cache: Mutex>, - pub(super) statekeyshort_cache: Mutex>, - pub(super) shortstatekey_cache: Mutex>, - pub(super) our_real_users_cache: RwLock>>>, - pub(super) appservice_in_room_cache: RwLock>>, - pub(super) stateinfo_cache: Mutex< - LruCache< - u64, - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - >, - >, -} - -impl Rooms { - /// Builds a StateMap by iterating over all keys that start - /// with state_hash, this gives the full state for the given state_hash. - #[tracing::instrument(skip(self))] - pub fn state_full_ids(&self, shortstatehash: u64) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - full_state - .into_iter() - .map(|compressed| self.parse_compressed_state_event(compressed)) - .collect() - } - - #[tracing::instrument(skip(self))] - pub fn state_full( - &self, - shortstatehash: u64, - ) -> Result>> { - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .map(|compressed| self.parse_compressed_state_event(compressed)) - .filter_map(|r| r.ok()) - .map(|(_, eventid)| self.get_pdu(&eventid)) - .filter_map(|r| r.ok().flatten()) - .map(|pdu| { - Ok::<_, Error>(( - ( - pdu.kind.clone(), - pdu.state_key - .as_ref() - .ok_or_else(|| Error::bad_database("State event has no state key."))? - .clone(), - ), - pdu, - )) - }) - .filter_map(|r| r.ok()) - .collect()) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get_id( - &self, - shortstatehash: u64, - event_type: &EventType, - state_key: &str, - ) -> Result>> { - let shortstatekey = match self.get_shortstatekey(event_type, state_key)? { - Some(s) => s, - None => return Ok(None), - }; - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - Ok(full_state - .into_iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - .and_then(|compressed| { - self.parse_compressed_state_event(compressed) - .ok() - .map(|(_, id)| id) - })) - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn state_get( - &self, - shortstatehash: u64, - event_type: &EventType, - state_key: &str, - ) -> Result>> { - self.state_get_id(shortstatehash, event_type, state_key)? - .map_or(Ok(None), |event_id| self.get_pdu(&event_id)) - } - - /// Returns the state hash for this pdu. - #[tracing::instrument(skip(self))] - pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { - self.eventid_shorteventid - .get(event_id.as_bytes())? - .map_or(Ok(None), |shorteventid| { - self.shorteventid_shortstatehash - .get(&shorteventid)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database( - "Invalid shortstatehash bytes in shorteventid_shortstatehash", - ) - }) - }) - .transpose() - }) - } - - /// Returns the last state hash key added to the db for the given room. - #[tracing::instrument(skip(self))] - pub fn current_shortstatehash(&self, room_id: &RoomId) -> Result> { - self.roomid_shortstatehash - .get(room_id.as_bytes())? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomid_shortstatehash") - })?)) - }) - } - - /// This fetches auth events from the current state. - #[tracing::instrument(skip(self))] - pub fn get_auth_events( - &self, - room_id: &RoomId, - kind: &EventType, - sender: &UserId, - state_key: Option<&str>, - content: &serde_json::value::RawValue, - ) -> Result>> { - let shortstatehash = - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - current_shortstatehash - } else { - return Ok(HashMap::new()); - }; - - let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) - .expect("content is a valid JSON object"); - - let mut sauthevents = auth_events - .into_iter() - .filter_map(|(event_type, state_key)| { - self.get_shortstatekey(&event_type, &state_key) - .ok() - .flatten() - .map(|s| (s, (event_type, state_key))) - }) - .collect::>(); - - let full_state = self - .load_shortstatehash_info(shortstatehash)? - .pop() - .expect("there is always one layer") - .1; - - Ok(full_state - .into_iter() - .filter_map(|compressed| self.parse_compressed_state_event(compressed).ok()) - .filter_map(|(shortstatekey, event_id)| { - sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) - }) - .filter_map(|(k, event_id)| self.get_pdu(&event_id).ok().flatten().map(|pdu| (k, pdu))) - .collect()) - } - - /// Generate a new StateHash. - /// - /// A unique hash made from hashing all PDU ids of the state joined with 0xff. - fn calculate_hash(&self, bytes_list: &[&[u8]]) -> StateHashId { - // We only hash the pdu's event ids, not the whole pdu - let bytes = bytes_list.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().into() - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn exists(&self, room_id: &RoomId) -> Result { - let prefix = match self.get_shortroomid(room_id)? { - Some(b) => b.to_be_bytes().to_vec(), - None => return Ok(false), - }; - - // Look for PDUs in that room. - Ok(self - .pduid_pdu - .iter_from(&prefix, false) - .next() - .filter(|(k, _)| k.starts_with(&prefix)) - .is_some()) - } - - /// Checks if a room exists. - #[tracing::instrument(skip(self))] - pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Look for PDUs in that room. - self.pduid_pdu - .iter_from(&prefix, false) - .filter(|(k, _)| k.starts_with(&prefix)) - .map(|(_, pdu)| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid first PDU in db.")) - .map(Arc::new) - }) - .next() - .transpose() - } - - /// Force the creation of a new StateHash and insert it into the db. - /// - /// Whatever `state` is supplied to `force_state` becomes the new current room state snapshot. - #[tracing::instrument(skip(self, new_state_ids_compressed, db))] - pub fn force_state( - &self, - room_id: &RoomId, - new_state_ids_compressed: HashSet, - db: &Database, - ) -> Result<()> { - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &new_state_ids_compressed - .iter() - .map(|bytes| &bytes[..]) - .collect::>(), - ); - - let (new_shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, &db.globals)?; - - if Some(new_shortstatehash) == previous_shortstatehash { - return Ok(()); - } - - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() - { - let statediffnew: HashSet<_> = new_state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&new_state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (new_state_ids_compressed, HashSet::new()) - }; - - if !already_existed { - self.save_state_from_diff( - new_shortstatehash, - statediffnew.clone(), - statediffremoved, - 2, // every state change is 2 event changes on average - states_parents, - )?; - }; - - for event_id in statediffnew.into_iter().filter_map(|new| { - self.parse_compressed_state_event(new) - .ok() - .map(|(_, id)| id) - }) { - let pdu = match self.get_pdu_json(&event_id)? { - Some(pdu) => pdu, - None => continue, - }; - - if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { - continue; - } - - let pdu: PduEvent = match serde_json::from_str( - &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), - ) { - Ok(pdu) => pdu, - Err(_) => continue, - }; - - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - let membership = match serde_json::from_str::(pdu.content.get()) { - Ok(e) => e.membership, - Err(_) => continue, - }; - - let state_key = match pdu.state_key { - Some(k) => k, - None => continue, - }; - - let user_id = match UserId::try_from(state_key) { - Ok(id) => id, - Err(_) => continue, - }; - - self.update_membership(room_id, &user_id, membership, &pdu.sender, None, db, false)?; - } - - self.update_joined_count(room_id, db)?; - - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &new_shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. - #[tracing::instrument(skip(self))] - pub fn load_shortstatehash_info( - &self, - shortstatehash: u64, - ) -> Result< - Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - > { - if let Some(r) = self - .stateinfo_cache - .lock() - .unwrap() - .get_mut(&shortstatehash) - { - return Ok(r.clone()); - } - - let value = self - .shortstatehash_statediff - .get(&shortstatehash.to_be_bytes())? - .ok_or_else(|| Error::bad_database("State hash does not exist"))?; - let parent = - utils::u64_from_bytes(&value[0..size_of::()]).expect("bytes have right length"); - - let mut add_mode = true; - let mut added = HashSet::new(); - let mut removed = HashSet::new(); - - let mut i = size_of::(); - while let Some(v) = value.get(i..i + 2 * size_of::()) { - if add_mode && v.starts_with(&0_u64.to_be_bytes()) { - add_mode = false; - i += size_of::(); - continue; - } - if add_mode { - added.insert(v.try_into().expect("we checked the size above")); - } else { - removed.insert(v.try_into().expect("we checked the size above")); - } - i += 2 * size_of::(); - } - - if parent != 0_u64 { - let mut response = self.load_shortstatehash_info(parent)?; - let mut state = response.last().unwrap().1.clone(); - state.extend(added.iter().copied()); - for r in &removed { - state.remove(r); - } - - response.push((shortstatehash, state, added, removed)); - - Ok(response) - } else { - let response = vec![(shortstatehash, added.clone(), added, removed)]; - self.stateinfo_cache - .lock() - .unwrap() - .insert(shortstatehash, response.clone()); - Ok(response) - } - } - - #[tracing::instrument(skip(self, globals))] - pub fn compress_state_event( - &self, - shortstatekey: u64, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - let mut v = shortstatekey.to_be_bytes().to_vec(); - v.extend_from_slice( - &self - .get_or_create_shorteventid(event_id, globals)? - .to_be_bytes(), - ); - Ok(v.try_into().expect("we checked the size above")) - } - - /// Returns shortstatekey, event id - #[tracing::instrument(skip(self, compressed_event))] - pub fn parse_compressed_state_event( - &self, - compressed_event: CompressedStateEvent, - ) -> Result<(u64, Arc)> { - Ok(( - utils::u64_from_bytes(&compressed_event[0..size_of::()]) - .expect("bytes have right length"), - self.get_eventid_from_short( - utils::u64_from_bytes(&compressed_event[size_of::()..]) - .expect("bytes have right length"), - )?, - )) - } - - /// Creates a new shortstatehash that often is just a diff to an already existing - /// shortstatehash and therefore very efficient. - /// - /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer - /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 - /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's - /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. - /// - /// * `shortstatehash` - Shortstatehash of this state - /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid - /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid - /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer - /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer - #[tracing::instrument(skip( - self, - statediffnew, - statediffremoved, - diff_to_sibling, - parent_states - ))] - pub fn save_state_from_diff( - &self, - shortstatehash: u64, - statediffnew: HashSet, - statediffremoved: HashSet, - diff_to_sibling: usize, - mut parent_states: Vec<( - u64, // sstatehash - HashSet, // full state - HashSet, // added - HashSet, // removed - )>, - ) -> Result<()> { - let diffsum = statediffnew.len() + statediffremoved.len(); - - if parent_states.len() > 3 { - // Number of layers - // To many layers, we have to go deeper - let parent = parent_states.pop().unwrap(); - - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - - return Ok(()); - } - - if parent_states.is_empty() { - // There is no parent layer, create a new state - let mut value = 0_u64.to_be_bytes().to_vec(); // 0 means no parent - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - warn!("Tried to create new state with removals"); - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - - return Ok(()); - }; - - // Else we have two options. - // 1. We add the current diff on top of the parent layer. - // 2. We replace a layer above - - let parent = parent_states.pop().unwrap(); - let parent_diff = parent.2.len() + parent.3.len(); - - if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { - // Diff too big, we replace above layer(s) - let mut parent_new = parent.2; - let mut parent_removed = parent.3; - - for removed in statediffremoved { - if !parent_new.remove(&removed) { - // It was not added in the parent and we removed it - parent_removed.insert(removed); - } - // Else it was added in the parent and we removed it again. We can forget this change - } - - for new in statediffnew { - if !parent_removed.remove(&new) { - // It was not touched in the parent and we added it - parent_new.insert(new); - } - // Else it was removed in the parent and we added it again. We can forget this change - } - - self.save_state_from_diff( - shortstatehash, - parent_new, - parent_removed, - diffsum, - parent_states, - )?; - } else { - // Diff small enough, we add diff as layer on top of parent - let mut value = parent.0.to_be_bytes().to_vec(); - for new in &statediffnew { - value.extend_from_slice(&new[..]); - } - - if !statediffremoved.is_empty() { - value.extend_from_slice(&0_u64.to_be_bytes()); - for removed in &statediffremoved { - value.extend_from_slice(&removed[..]); - } - } - - self.shortstatehash_statediff - .insert(&shortstatehash.to_be_bytes(), &value)?; - } - - Ok(()) - } - - /// Returns (shortstatehash, already_existed) - #[tracing::instrument(skip(self, globals))] - fn get_or_create_shortstatehash( - &self, - state_hash: &StateHashId, - globals: &super::globals::Globals, - ) -> Result<(u64, bool)> { - Ok(match self.statehash_shortstatehash.get(state_hash)? { - Some(shortstatehash) => ( - utils::u64_from_bytes(&shortstatehash) - .map_err(|_| Error::bad_database("Invalid shortstatehash in db."))?, - true, - ), - None => { - let shortstatehash = globals.next_count()?; - self.statehash_shortstatehash - .insert(state_hash, &shortstatehash.to_be_bytes())?; - (shortstatehash, false) - } - }) - } - - #[tracing::instrument(skip(self, globals))] - pub fn get_or_create_shorteventid( - &self, - event_id: &EventId, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self.eventidshort_cache.lock().unwrap().get_mut(event_id) { - return Ok(*short); - } - - let short = match self.eventid_shorteventid.get(event_id.as_bytes())? { - Some(shorteventid) => utils::u64_from_bytes(&shorteventid) - .map_err(|_| Error::bad_database("Invalid shorteventid in db."))?, - None => { - let shorteventid = globals.next_count()?; - self.eventid_shorteventid - .insert(event_id.as_bytes(), &shorteventid.to_be_bytes())?; - self.shorteventid_eventid - .insert(&shorteventid.to_be_bytes(), event_id.as_bytes())?; - shorteventid - } - }; - - self.eventidshort_cache - .lock() - .unwrap() - .insert(event_id.clone(), short); - - Ok(short) - } - - #[tracing::instrument(skip(self))] - pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { - self.roomid_shortroomid - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid shortroomid in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn get_shortstatekey( - &self, - event_type: &EventType, - state_key: &str, - ) -> Result> { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(Some(*short)); - } - - let mut statekey = event_type.as_ref().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = self - .statekey_shortstatekey - .get(&statekey)? - .map(|shortstatekey| { - utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db.")) - }) - .transpose()?; - - if let Some(s) = short { - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), s); - } - - Ok(short) - } - - #[tracing::instrument(skip(self, globals))] - pub fn get_or_create_shortroomid( - &self, - room_id: &RoomId, - globals: &super::globals::Globals, - ) -> Result { - Ok(match self.roomid_shortroomid.get(room_id.as_bytes())? { - Some(short) => utils::u64_from_bytes(&short) - .map_err(|_| Error::bad_database("Invalid shortroomid in db."))?, - None => { - let short = globals.next_count()?; - self.roomid_shortroomid - .insert(room_id.as_bytes(), &short.to_be_bytes())?; - short - } - }) - } - - #[tracing::instrument(skip(self, globals))] - pub fn get_or_create_shortstatekey( - &self, - event_type: &EventType, - state_key: &str, - globals: &super::globals::Globals, - ) -> Result { - if let Some(short) = self - .statekeyshort_cache - .lock() - .unwrap() - .get_mut(&(event_type.clone(), state_key.to_owned())) - { - return Ok(*short); - } - - let mut statekey = event_type.as_ref().as_bytes().to_vec(); - statekey.push(0xff); - statekey.extend_from_slice(state_key.as_bytes()); - - let short = match self.statekey_shortstatekey.get(&statekey)? { - Some(shortstatekey) => utils::u64_from_bytes(&shortstatekey) - .map_err(|_| Error::bad_database("Invalid shortstatekey in db."))?, - None => { - let shortstatekey = globals.next_count()?; - self.statekey_shortstatekey - .insert(&statekey, &shortstatekey.to_be_bytes())?; - self.shortstatekey_statekey - .insert(&shortstatekey.to_be_bytes(), &statekey)?; - shortstatekey - } - }; - - self.statekeyshort_cache - .lock() - .unwrap() - .insert((event_type.clone(), state_key.to_owned()), short); - - Ok(short) - } - - #[tracing::instrument(skip(self))] - pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { - if let Some(id) = self - .shorteventid_cache - .lock() - .unwrap() - .get_mut(&shorteventid) - { - return Ok(Arc::clone(id)); - } - - let bytes = self - .shorteventid_eventid - .get(&shorteventid.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shorteventid does not exist"))?; - - let event_id = Arc::new( - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in shorteventid_eventid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in shorteventid_eventid is invalid."))?, - ); - - self.shorteventid_cache - .lock() - .unwrap() - .insert(shorteventid, Arc::clone(&event_id)); - - Ok(event_id) - } - - #[tracing::instrument(skip(self))] - pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(EventType, String)> { - if let Some(id) = self - .shortstatekey_cache - .lock() - .unwrap() - .get_mut(&shortstatekey) - { - return Ok(id.clone()); - } - - let bytes = self - .shortstatekey_statekey - .get(&shortstatekey.to_be_bytes())? - .ok_or_else(|| Error::bad_database("Shortstatekey does not exist"))?; - - let mut parts = bytes.splitn(2, |&b| b == 0xff); - let eventtype_bytes = parts.next().expect("split always returns one entry"); - let statekey_bytes = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid statekey in shortstatekey_statekey."))?; - - let event_type = - EventType::try_from(utils::string_from_bytes(eventtype_bytes).map_err(|_| { - Error::bad_database("Event type in shortstatekey_statekey is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Event type in shortstatekey_statekey is invalid."))?; - - let state_key = utils::string_from_bytes(statekey_bytes).map_err(|_| { - Error::bad_database("Statekey in shortstatekey_statekey is invalid unicode.") - })?; - - let result = (event_type, state_key); - - self.shortstatekey_cache - .lock() - .unwrap() - .insert(shortstatekey, result.clone()); - - Ok(result) - } - - /// Returns the full room state. - #[tracing::instrument(skip(self))] - pub fn room_state_full( - &self, - room_id: &RoomId, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_full(current_shortstatehash) - } else { - Ok(HashMap::new()) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get_id( - &self, - room_id: &RoomId, - event_type: &EventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get_id(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). - #[tracing::instrument(skip(self))] - pub fn room_state_get( - &self, - room_id: &RoomId, - event_type: &EventType, - state_key: &str, - ) -> Result>> { - if let Some(current_shortstatehash) = self.current_shortstatehash(room_id)? { - self.state_get(current_shortstatehash, event_type, state_key) - } else { - Ok(None) - } - } - - /// Returns the `count` of this pdu's id. - #[tracing::instrument(skip(self))] - pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { - utils::u64_from_bytes(&pdu_id[pdu_id.len() - size_of::()..]) - .map_err(|_| Error::bad_database("PDU has invalid count bytes.")) - } - - /// Returns the `count` of this pdu's id. - #[tracing::instrument(skip(self))] - pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pdu_id| self.pdu_count(&pdu_id)) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.pduid_pdu - .iter_from(&last_possible_key, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .next() - .map(|b| self.pdu_count(&b.0)) - .transpose() - .map(|op| op.unwrap_or_default()) - } - - /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] - pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] - pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the json of a pdu. - #[tracing::instrument(skip(self))] - pub fn get_non_outlier_pdu_json( - &self, - event_id: &EventId, - ) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu's id. - #[tracing::instrument(skip(self))] - pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - self.eventid_pduid.get(event_id.as_bytes()) - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[tracing::instrument(skip(self))] - pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { - self.eventid_pduid - .get(event_id.as_bytes())? - .map(|pduid| { - self.pduid_pdu - .get(&pduid)? - .ok_or_else(|| Error::bad_database("Invalid pduid in eventid_pduid.")) - }) - .transpose()? - .map(|pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - .transpose() - } - - /// Returns the pdu. - /// - /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. - #[tracing::instrument(skip(self))] - pub fn get_pdu(&self, event_id: &EventId) -> Result>> { - if let Some(p) = self.pdu_cache.lock().unwrap().get_mut(event_id) { - return Ok(Some(Arc::clone(p))); - } - - if let Some(pdu) = self - .eventid_pduid - .get(event_id.as_bytes())? - .map_or_else( - || self.eventid_outlierpdu.get(event_id.as_bytes()), - |pduid| { - Ok(Some(self.pduid_pdu.get(&pduid)?.ok_or_else(|| { - Error::bad_database("Invalid pduid in eventid_pduid.") - })?)) - }, - )? - .map(|pdu| { - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db.")) - .map(Arc::new) - }) - .transpose()? - { - self.pdu_cache - .lock() - .unwrap() - .insert(event_id.clone(), Arc::clone(&pdu)); - Ok(Some(pdu)) - } else { - Ok(None) - } - } - - /// Returns the pdu. - /// - /// This does __NOT__ check the outliers `Tree`. - #[tracing::instrument(skip(self))] - pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Returns the pdu as a `BTreeMap`. - #[tracing::instrument(skip(self))] - pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { - self.pduid_pdu.get(pdu_id)?.map_or(Ok(None), |pdu| { - Ok(Some( - serde_json::from_slice(&pdu) - .map_err(|_| Error::bad_database("Invalid PDU in db."))?, - )) - }) - } - - /// Removes a pdu and creates a new one with the same id. - #[tracing::instrument(skip(self))] - fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { - if self.pduid_pdu.get(pdu_id)?.is_some() { - self.pduid_pdu.insert( - pdu_id, - &serde_json::to_vec(pdu).expect("PduEvent::to_vec always works"), - )?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "PDU does not exist.", - )) - } - } - - /// Returns the leaf pdus of a room. - #[tracing::instrument(skip(self))] - pub fn get_pdu_leaves(&self, room_id: &RoomId) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomid_pduleaves - .scan_prefix(prefix) - .map(|(_, bytes)| { - EventId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("EventID in roomid_pduleaves is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("EventId in roomid_pduleaves is invalid.")) - }) - .collect() - } - - #[tracing::instrument(skip(self, room_id, event_ids))] - pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { - for prev in event_ids { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(prev.as_bytes()); - self.referencedevents.insert(&key, &[])?; - } - - Ok(()) - } - - /// Replace the leaves of a room. - /// - /// The provided `event_ids` become the new leaves, this allows a room to have multiple - /// `prev_events`. - #[tracing::instrument(skip(self))] - pub fn replace_pdu_leaves(&self, room_id: &RoomId, event_ids: &[EventId]) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - for (key, _) in self.roomid_pduleaves.scan_prefix(prefix.clone()) { - self.roomid_pduleaves.remove(&key)?; - } - - for event_id in event_ids { - let mut key = prefix.to_owned(); - key.extend_from_slice(event_id.as_bytes()); - self.roomid_pduleaves.insert(&key, event_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.extend_from_slice(event_id.as_bytes()); - Ok(self.referencedevents.get(&key)?.is_some()) - } - - /// Returns the pdu from the outlier tree. - #[tracing::instrument(skip(self))] - pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { - self.eventid_outlierpdu - .get(event_id.as_bytes())? - .map_or(Ok(None), |pdu| { - serde_json::from_slice(&pdu).map_err(|_| Error::bad_database("Invalid PDU in db.")) - }) - } - - /// Append the PDU as an outlier. - /// - /// Any event given to this will be processed (state-res) on another thread. - #[tracing::instrument(skip(self, pdu))] - pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { - self.eventid_outlierpdu.insert( - event_id.as_bytes(), - &serde_json::to_vec(&pdu).expect("CanonicalJsonObject is valid"), - ) - } - - #[tracing::instrument(skip(self))] - pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { - self.softfailedeventids.insert(event_id.as_bytes(), &[]) - } - - #[tracing::instrument(skip(self))] - pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { - self.softfailedeventids - .get(event_id.as_bytes()) - .map(|o| o.is_some()) - } - - /// Creates a new persisted data unit and adds it to a room. - /// - /// By this point the incoming event should be fully authenticated, no auth happens - /// in `append_pdu`. - /// - /// Returns pdu id - #[tracing::instrument(skip(self, pdu, pdu_json, leaves, db))] - pub fn append_pdu( - &self, - pdu: &PduEvent, - mut pdu_json: CanonicalJsonObject, - leaves: &[EventId], - db: &Database, - ) -> Result> { - let shortroomid = self.get_shortroomid(&pdu.room_id)?.expect("room exists"); - - // Make unsigned fields correct. This is not properly documented in the spec, but state - // events need to have previous content in the unsigned field, so clients can easily - // interpret things like membership changes - if let Some(state_key) = &pdu.state_key { - if let CanonicalJsonValue::Object(unsigned) = pdu_json - .entry("unsigned".to_owned()) - .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) - { - if let Some(shortstatehash) = self.pdu_shortstatehash(&pdu.event_id).unwrap() { - if let Some(prev_state) = self - .state_get(shortstatehash, &pdu.kind, state_key) - .unwrap() - { - unsigned.insert( - "prev_content".to_owned(), - CanonicalJsonValue::Object( - utils::to_canonical_object(prev_state.content.clone()) - .expect("event is valid, we just created it"), - ), - ); - } - } - } else { - error!("Invalid unsigned type in pdu."); - } - } - - // We must keep track of all events that have been referenced. - self.mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - self.replace_pdu_leaves(&pdu.room_id, leaves)?; - - let mutex_insert = Arc::clone( - db.globals - .roomid_mutex_insert - .write() - .unwrap() - .entry(pdu.room_id.clone()) - .or_default(), - ); - let insert_lock = mutex_insert.lock().unwrap(); - - let count1 = db.globals.next_count()?; - // Mark as read first so the sending client doesn't get a notification even if appending - // fails - self.edus - .private_read_set(&pdu.room_id, &pdu.sender, count1, &db.globals)?; - self.reset_notification_counts(&pdu.sender, &pdu.room_id)?; - - let count2 = db.globals.next_count()?; - let mut pdu_id = shortroomid.to_be_bytes().to_vec(); - pdu_id.extend_from_slice(&count2.to_be_bytes()); - - // There's a brief moment of time here where the count is updated but the pdu does not - // exist. This could theoretically lead to dropped pdus, but it's extremely rare - // - // Update: We fixed this using insert_lock - - self.pduid_pdu.insert( - &pdu_id, - &serde_json::to_vec(&pdu_json).expect("CanonicalJsonObject is always a valid"), - )?; - - self.eventid_pduid - .insert(pdu.event_id.as_bytes(), &pdu_id)?; - self.eventid_outlierpdu.remove(pdu.event_id.as_bytes())?; - - drop(insert_lock); - - // See if the event matches any known pushers - let power_levels: RoomPowerLevelsEventContent = db - .rooms - .room_state_get(&pdu.room_id, &EventType::RoomPowerLevels, "")? - .map(|ev| { - serde_json::from_str(ev.content.get()) - .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) - }) - .transpose()? - .unwrap_or_default(); - - let sync_pdu = pdu.to_sync_room_event(); - - let mut notifies = Vec::new(); - let mut highlights = Vec::new(); - - for user in self.get_our_real_users(&pdu.room_id, db)?.iter() { - // Don't notify the user of their own events - if user == &pdu.sender { - continue; - } - - let rules_for_user = db - .account_data - .get(None, user, EventType::PushRules)? - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| Ruleset::server_default(user)); - - let mut highlight = false; - let mut notify = false; - - for action in pusher::get_actions( - user, - &rules_for_user, - &power_levels, - &sync_pdu, - &pdu.room_id, - db, - )? { - match action { - Action::DontNotify => notify = false, - // TODO: Implement proper support for coalesce - Action::Notify | Action::Coalesce => notify = true, - Action::SetTweak(Tweak::Highlight(true)) => { - highlight = true; - } - _ => {} - }; - } - - let mut userroom_id = user.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(pdu.room_id.as_bytes()); - - if notify { - notifies.push(userroom_id.clone()); - } - - if highlight { - highlights.push(userroom_id); - } - - for senderkey in db.pusher.get_pusher_senderkeys(user) { - db.sending.send_push_pdu(&*pdu_id, senderkey)?; - } - } - - self.userroomid_notificationcount - .increment_batch(&mut notifies.into_iter())?; - self.userroomid_highlightcount - .increment_batch(&mut highlights.into_iter())?; - - match pdu.kind { - EventType::RoomRedaction => { - if let Some(redact_id) = &pdu.redacts { - self.redact_pdu(redact_id, pdu)?; - } - } - EventType::RoomMember => { - if let Some(state_key) = &pdu.state_key { - #[derive(Deserialize)] - struct ExtractMembership { - membership: MembershipState, - } - - // if the state_key fails - let target_user_id = UserId::try_from(state_key.clone()) - .expect("This state_key was previously validated"); - - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - let invite_state = match content.membership { - MembershipState::Invite => { - let state = self.calculate_invite_state(pdu)?; - Some(state) - } - _ => None, - }; - - // Update our membership info, we do this here incase a user is invited - // and immediately leaves we need the DB to record the invite event for auth - self.update_membership( - &pdu.room_id, - &target_user_id, - content.membership, - &pdu.sender, - invite_state, - db, - true, - )?; - } - } - EventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody<'a> { - #[serde(borrow)] - body: Option>, - } - - let content = serde_json::from_str::>(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - - if let Some(body) = content.body { - let mut batch = body - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .filter(|word| word.len() <= 50) - .map(str::to_lowercase) - .map(|word| { - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(word.as_bytes()); - key.push(0xff); - key.extend_from_slice(&pdu_id); - (key, Vec::new()) - }); - - self.tokenids.insert_batch(&mut batch)?; - - if body.starts_with(&format!("@conduit:{}: ", db.globals.server_name())) - && self - .id_from_alias( - &format!("#admins:{}", db.globals.server_name()) - .try_into() - .expect("#admins:server_name is a valid room alias"), - )? - .as_ref() - == Some(&pdu.room_id) - { - let mut lines = body.lines(); - let command_line = lines.next().expect("each string has at least one line"); - let body: Vec<_> = lines.collect(); - - let mut parts = command_line.split_whitespace().skip(1); - if let Some(command) = parts.next() { - let args: Vec<_> = parts.collect(); - - match command { - "register_appservice" => { - if body.len() > 2 - && body[0].trim() == "```" - && body.last().unwrap().trim() == "```" - { - let appservice_config = body[1..body.len() - 1].join("\n"); - let parsed_config = serde_yaml::from_str::( - &appservice_config, - ); - match parsed_config { - Ok(yaml) => { - db.admin - .send(AdminCommand::RegisterAppservice(yaml)); - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Could not parse appservice config: {}", - e - )), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Expected code block in command body.", - ), - )); - } - } - "list_appservices" => { - db.admin.send(AdminCommand::ListAppservices); - } - "get_auth_chain" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::try_from(args[0]) { - if let Some(event) = db.rooms.get_pdu_json(&event_id)? { - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| { - Error::bad_database( - "Invalid event in database", - ) - })?; - - let room_id = RoomId::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - let start = Instant::now(); - let count = server_server::get_auth_chain( - &room_id, - vec![Arc::new(event_id)], - db, - )? - .count(); - let elapsed = start.elapsed(); - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Loaded auth chain with length {} in {:?}", - count, elapsed - )), - )); - } - } - } - } - "parse_pdu" => { - if body.len() > 2 - && body[0].trim() == "```" - && body.last().unwrap().trim() == "```" - { - let string = body[1..body.len() - 1].join("\n"); - match serde_json::from_str(&string) { - Ok(value) => { - let event_id = EventId::try_from(&*format!( - "${}", - // Anything higher than version3 behaves the same - ruma::signatures::reference_hash( - &value, - &RoomVersionId::Version6 - ) - .expect("ruma can calculate reference hashes") - )) - .expect( - "ruma's reference hashes are valid event ids", - ); - - match serde_json::from_value::( - serde_json::to_value(value) - .expect("value is json"), - ) { - Ok(pdu) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - format!( - "EventId: {:?}\n{:#?}", - event_id, pdu - ), - ), - )); - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - format!("EventId: {:?}\nCould not parse event: {}", event_id, e), - ), - )); - } - } - } - Err(e) => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Invalid json in command body: {}", - e - )), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Expected code block in command body.", - ), - )); - } - } - "get_pdu" => { - if args.len() == 1 { - if let Ok(event_id) = EventId::try_from(args[0]) { - let mut outlier = false; - let mut pdu_json = - db.rooms.get_non_outlier_pdu_json(&event_id)?; - if pdu_json.is_none() { - outlier = true; - pdu_json = db.rooms.get_pdu_json(&event_id)?; - } - match pdu_json { - Some(json) => { - let json_text = - serde_json::to_string_pretty(&json) - .expect("canonical json is valid json"); - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_html( - format!("{}\n```json\n{}\n```", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, json_text), - format!("

{}

\n
{}\n
\n", - if outlier { - "PDU is outlier" - } else { "PDU was accepted"}, RawStr::new(&json_text).html_escape()) - ), - )); - } - None => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "PDU not found.", - ), - )); - } - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Event ID could not be parsed.", - ), - )); - } - } else { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain( - "Usage: get_pdu ", - ), - )); - } - } - _ => { - db.admin.send(AdminCommand::SendMessage( - RoomMessageEventContent::text_plain(format!( - "Unrecognized command: {}", - command - )), - )); - } - } - } - } - } - } - _ => {} - } - - Ok(pdu_id) - } - - #[tracing::instrument(skip(self))] - pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - self.userroomid_highlightcount - .insert(&userroom_id, &0_u64.to_be_bytes())?; - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_notificationcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid notification count in db.")) - }) - .unwrap_or(Ok(0)) - } - - #[tracing::instrument(skip(self))] - pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - self.userroomid_highlightcount - .get(&userroom_id)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid highlight count in db.")) - }) - .unwrap_or(Ok(0)) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, state_ids_compressed, globals))] - pub fn set_event_state( - &self, - event_id: &EventId, - room_id: &RoomId, - state_ids_compressed: HashSet, - globals: &super::globals::Globals, - ) -> Result<()> { - let shorteventid = self.get_or_create_shorteventid(event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(room_id)?; - - let state_hash = self.calculate_hash( - &state_ids_compressed - .iter() - .map(|s| &s[..]) - .collect::>(), - ); - - let (shortstatehash, already_existed) = - self.get_or_create_shortstatehash(&state_hash, globals)?; - - if !already_existed { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let (statediffnew, statediffremoved) = - if let Some(parent_stateinfo) = states_parents.last() { - let statediffnew: HashSet<_> = state_ids_compressed - .difference(&parent_stateinfo.1) - .copied() - .collect(); - - let statediffremoved: HashSet<_> = parent_stateinfo - .1 - .difference(&state_ids_compressed) - .copied() - .collect(); - - (statediffnew, statediffremoved) - } else { - (state_ids_compressed, HashSet::new()) - }; - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 1_000_000, // high number because no state will be based on this one - states_parents, - )?; - } - - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - /// Generates a new StateHash and associates it with the incoming event. - /// - /// This adds all current state events (not including the incoming event) - /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. - #[tracing::instrument(skip(self, new_pdu, globals))] - pub fn append_to_state( - &self, - new_pdu: &PduEvent, - globals: &super::globals::Globals, - ) -> Result { - let shorteventid = self.get_or_create_shorteventid(&new_pdu.event_id, globals)?; - - let previous_shortstatehash = self.current_shortstatehash(&new_pdu.room_id)?; - - if let Some(p) = previous_shortstatehash { - self.shorteventid_shortstatehash - .insert(&shorteventid.to_be_bytes(), &p.to_be_bytes())?; - } - - if let Some(state_key) = &new_pdu.state_key { - let states_parents = previous_shortstatehash - .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; - - let shortstatekey = - self.get_or_create_shortstatekey(&new_pdu.kind, state_key, globals)?; - - let new = self.compress_state_event(shortstatekey, &new_pdu.event_id, globals)?; - - let replaces = states_parents - .last() - .map(|info| { - info.1 - .iter() - .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) - }) - .unwrap_or_default(); - - if Some(&new) == replaces { - return Ok(previous_shortstatehash.expect("must exist")); - } - - // TODO: statehash with deterministic inputs - let shortstatehash = globals.next_count()?; - - let mut statediffnew = HashSet::new(); - statediffnew.insert(new); - - let mut statediffremoved = HashSet::new(); - if let Some(replaces) = replaces { - statediffremoved.insert(*replaces); - } - - self.save_state_from_diff( - shortstatehash, - statediffnew, - statediffremoved, - 2, - states_parents, - )?; - - Ok(shortstatehash) - } else { - Ok(previous_shortstatehash.expect("first event in room must be a state event")) - } - } - - #[tracing::instrument(skip(self, invite_event))] - pub fn calculate_invite_state( - &self, - invite_event: &PduEvent, - ) -> Result>> { - let mut state = Vec::new(); - // Add recommended events - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomCreate, "")? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &EventType::RoomJoinRules, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = - self.room_state_get(&invite_event.room_id, &EventType::RoomCanonicalAlias, "")? - { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomAvatar, "")? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get(&invite_event.room_id, &EventType::RoomName, "")? { - state.push(e.to_stripped_state_event()); - } - if let Some(e) = self.room_state_get( - &invite_event.room_id, - &EventType::RoomMember, - invite_event.sender.as_str(), - )? { - state.push(e.to_stripped_state_event()); - } - - state.push(invite_event.to_stripped_state_event()); - Ok(state) - } - - #[tracing::instrument(skip(self))] - pub fn set_room_state(&self, room_id: &RoomId, shortstatehash: u64) -> Result<()> { - self.roomid_shortstatehash - .insert(room_id.as_bytes(), &shortstatehash.to_be_bytes())?; - - Ok(()) - } - - pub fn associate_token_shortstatehash( - &self, - room_id: &RoomId, - token: u64, - shortstatehash: u64, - ) -> Result<()> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .insert(&key, &shortstatehash.to_be_bytes()) - } - - pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { - let shortroomid = self.get_shortroomid(room_id)?.expect("room exists"); - - let mut key = shortroomid.to_be_bytes().to_vec(); - key.extend_from_slice(&token.to_be_bytes()); - - self.roomsynctoken_shortstatehash - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid shortstatehash in roomsynctoken_shortstatehash") - }) - }) - .transpose() - } - - /// Creates a new persisted data unit and adds it to a room. - #[tracing::instrument(skip(self, db, _mutex_lock))] - pub fn build_and_append_pdu( - &self, - pdu_builder: PduBuilder, - sender: &UserId, - room_id: &RoomId, - db: &Database, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex - ) -> Result { - let PduBuilder { - event_type, - content, - unsigned, - state_key, - redacts, - } = pdu_builder; - - let prev_events = self - .get_pdu_leaves(room_id)? - .into_iter() - .take(20) - .collect::>(); - - let create_event = self.room_state_get(room_id, &EventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - - // If there was no create event yet, assume we are creating a version 6 room right now - let room_version_id = create_event_content - .map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - let auth_events = - self.get_auth_events(room_id, &event_type, sender, state_key.as_deref(), &content)?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(self.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = unsigned.unwrap_or_default(); - if let Some(state_key) = &state_key { - if let Some(prev_pdu) = self.room_state_get(room_id, &event_type, state_key)? { - unsigned.insert( - "prev_content".to_owned(), - serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), - ); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), - ); - } - } - - let mut pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: room_id.clone(), - sender: sender.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind: event_type, - content, - state_key, - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - create_prev_event, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_ref().to_owned()), - ); - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut pdu_json, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - pdu.event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&pdu_json, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - pdu_json.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), - ); - - // Generate short event id - let _shorteventid = self.get_or_create_shorteventid(&pdu.event_id, &db.globals)?; - - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - let statehashid = self.append_to_state(&pdu, &db.globals)?; - - let pdu_id = self.append_pdu( - &pdu, - pdu_json, - // Since this PDU references all pdu_leaves we can update the leaves - // of the room - &[pdu.event_id.clone()], - db, - )?; - - // We set the room state after inserting the pdu, so that we never have a moment in time - // where events in the current room state do not exist - self.set_room_state(room_id, statehashid)?; - - let servers = self - .room_servers(room_id) - .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); - - db.sending.send_pdu(servers, &pdu_id)?; - - for appservice in db.appservice.all()? { - if self.appservice_in_room(room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == EventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - self.room_aliases(room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(pdu.event_id) - } - - /// Returns an iterator over all PDUs in a room. - #[tracing::instrument(skip(self))] - pub fn all_pdus<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result, PduEvent)>> + 'a> { - self.pdus_since(user_id, room_id, 0) - } - - /// Returns an iterator over all events in a room that happened after the event with id `since` - /// in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_since<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - since: u64, - ) -> Result, PduEvent)>> + 'a> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - // Skip the first pdu if it's exactly at since, because we sent that last time - let mut first_pdu_id = prefix.clone(); - first_pdu_id.extend_from_slice(&(since + 1).to_be_bytes()); - - let user_id = user_id.clone(); - - Ok(self - .pduid_pdu - .iter_from(&first_pdu_id, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their tokens in a room that happened before the - /// event with id `until` in reverse-chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_until<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - until: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(until.saturating_sub(1)).to_be_bytes()); // -1 because we don't want event at `until` - - let current: &[u8] = ¤t; - - let user_id = user_id.clone(); - - Ok(self - .pduid_pdu - .iter_from(current, true) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Returns an iterator over all events and their token in a room that happened after the event - /// with id `from` in chronological order. - #[tracing::instrument(skip(self))] - pub fn pdus_after<'a>( - &'a self, - user_id: &UserId, - room_id: &RoomId, - from: u64, - ) -> Result, PduEvent)>> + 'a> { - // Create the first part of the full pdu id - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - - let mut current = prefix.clone(); - current.extend_from_slice(&(from + 1).to_be_bytes()); // +1 so we don't send the base event - - let current: &[u8] = ¤t; - - let user_id = user_id.clone(); - - Ok(self - .pduid_pdu - .iter_from(current, false) - .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pdu_id, v)| { - let mut pdu = serde_json::from_slice::(&v) - .map_err(|_| Error::bad_database("PDU in db is invalid."))?; - if pdu.sender != user_id { - pdu.remove_transaction_id()?; - } - Ok((pdu_id, pdu)) - })) - } - - /// Replace a PDU with the redacted form. - #[tracing::instrument(skip(self, reason))] - pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { - if let Some(pdu_id) = self.get_pdu_id(event_id)? { - let mut pdu = self - .get_pdu_from_id(&pdu_id)? - .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; - pdu.redact(reason)?; - self.replace_pdu(&pdu_id, &pdu)?; - Ok(()) - } else { - Err(Error::BadRequest( - ErrorKind::NotFound, - "Event ID does not exist.", - )) - } - } - - /// Update current membership data. - #[tracing::instrument(skip(self, last_state, db))] - pub fn update_membership( - &self, - room_id: &RoomId, - user_id: &UserId, - membership: MembershipState, - sender: &UserId, - last_state: Option>>, - db: &Database, - update_joined_count: bool, - ) -> Result<()> { - // Keep track what remote users exist by adding them as "deactivated" users - if user_id.server_name() != db.globals.server_name() { - db.users.create(user_id, None)?; - // TODO: displayname, avatar url - } - - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(user_id.server_name().as_bytes()); - - let mut serverroom_id = user_id.server_name().as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - match &membership { - MembershipState::Join => { - // Check if the user never joined this room - if !self.once_joined(user_id, room_id)? { - // Add the user ID to the join list then - self.roomuseroncejoinedids.insert(&userroom_id, &[])?; - - // Check if the room has a predecessor - if let Some(predecessor) = self - .room_state_get(room_id, &EventType::RoomCreate, "")? - .and_then(|create| serde_json::from_str(create.content.get()).ok()) - .and_then(|content: RoomCreateEventContent| content.predecessor) - { - // Copy user settings from predecessor to the current room: - // - Push rules - // - // TODO: finish this once push rules are implemented. - // - // let mut push_rules_event_content: PushRulesEvent = account_data - // .get( - // None, - // user_id, - // EventType::PushRules, - // )?; - // - // NOTE: find where `predecessor.room_id` match - // and update to `room_id`. - // - // account_data - // .update( - // None, - // user_id, - // EventType::PushRules, - // &push_rules_event_content, - // globals, - // ) - // .ok(); - - // Copy old tags to new room - if let Some(tag_event) = db.account_data.get::( - Some(&predecessor.room_id), - user_id, - EventType::Tag, - )? { - db.account_data - .update( - Some(room_id), - user_id, - EventType::Tag, - &tag_event, - &db.globals, - ) - .ok(); - }; - - // Copy direct chat flag - if let Some(mut direct_event) = - db.account_data - .get::(None, user_id, EventType::Direct)? - { - let mut room_ids_updated = false; - - for room_ids in direct_event.content.0.values_mut() { - if room_ids.iter().any(|r| r == &predecessor.room_id) { - room_ids.push(room_id.clone()); - room_ids_updated = true; - } - } - - if room_ids_updated { - db.account_data.update( - None, - user_id, - EventType::Direct, - &direct_event, - &db.globals, - )?; - } - }; - } - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_joined.insert(&userroom_id, &[])?; - self.roomuserid_joined.insert(&roomuser_id, &[])?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Invite => { - // We want to know if the sender is ignored by the receiver - let is_ignored = db - .account_data - .get::( - None, // Ignored users are in global account data - user_id, // Receiver - EventType::IgnoredUserList, - )? - .map_or(false, |ignored| { - ignored.content.ignored_users.contains(sender) - }); - - if is_ignored { - return Ok(()); - } - - if update_joined_count { - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - self.userroomid_invitestate.insert( - &userroom_id, - &serde_json::to_vec(&last_state.unwrap_or_default()) - .expect("state to bytes always works"), - )?; - self.roomuserid_invitecount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - } - MembershipState::Leave | MembershipState::Ban => { - if update_joined_count - && self - .room_members(room_id) - .chain(self.room_members_invited(room_id)) - .filter_map(|r| r.ok()) - .all(|u| u.server_name() != user_id.server_name()) - { - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - self.userroomid_leftstate.insert( - &userroom_id, - &serde_json::to_vec(&Vec::>::new()).unwrap(), - )?; // TODO - self.roomuserid_leftcount - .insert(&roomuser_id, &db.globals.next_count()?.to_be_bytes())?; - self.userroomid_joined.remove(&userroom_id)?; - self.roomuserid_joined.remove(&roomuser_id)?; - self.userroomid_invitestate.remove(&userroom_id)?; - self.roomuserid_invitecount.remove(&roomuser_id)?; - } - _ => {} - } - - if update_joined_count { - self.update_joined_count(room_id, db)?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn update_joined_count(&self, room_id: &RoomId, db: &Database) -> Result<()> { - let mut joinedcount = 0_u64; - let mut invitedcount = 0_u64; - let mut joined_servers = HashSet::new(); - let mut real_users = HashSet::new(); - - for joined in self.room_members(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(joined.server_name().to_owned()); - if joined.server_name() == db.globals.server_name() - && !db.users.is_deactivated(&joined).unwrap_or(true) - { - real_users.insert(joined); - } - joinedcount += 1; - } - - for invited in self.room_members_invited(room_id).filter_map(|r| r.ok()) { - joined_servers.insert(invited.server_name().to_owned()); - invitedcount += 1; - } - - self.roomid_joinedcount - .insert(room_id.as_bytes(), &joinedcount.to_be_bytes())?; - - self.roomid_invitedcount - .insert(room_id.as_bytes(), &invitedcount.to_be_bytes())?; - - self.our_real_users_cache - .write() - .unwrap() - .insert(room_id.clone(), Arc::new(real_users)); - - for old_joined_server in self.room_servers(room_id).filter_map(|r| r.ok()) { - if !joined_servers.remove(&old_joined_server) { - // Server not in room anymore - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(old_joined_server.as_bytes()); - - let mut serverroom_id = old_joined_server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.remove(&roomserver_id)?; - self.serverroomids.remove(&serverroom_id)?; - } - } - - // Now only new servers are in joined_servers anymore - for server in joined_servers { - let mut roomserver_id = room_id.as_bytes().to_vec(); - roomserver_id.push(0xff); - roomserver_id.extend_from_slice(server.as_bytes()); - - let mut serverroom_id = server.as_bytes().to_vec(); - serverroom_id.push(0xff); - serverroom_id.extend_from_slice(room_id.as_bytes()); - - self.roomserverids.insert(&roomserver_id, &[])?; - self.serverroomids.insert(&serverroom_id, &[])?; - } - - self.appservice_in_room_cache - .write() - .unwrap() - .remove(room_id); - - Ok(()) - } - - #[tracing::instrument(skip(self, room_id, db))] - pub fn get_our_real_users( - &self, - room_id: &RoomId, - db: &Database, - ) -> Result>> { - let maybe = self - .our_real_users_cache - .read() - .unwrap() - .get(room_id) - .cloned(); - if let Some(users) = maybe { - Ok(users) - } else { - self.update_joined_count(room_id, db)?; - Ok(Arc::clone( - self.our_real_users_cache - .read() - .unwrap() - .get(room_id) - .unwrap(), - )) - } - } - - #[tracing::instrument(skip(self, room_id, appservice, db))] - pub fn appservice_in_room( - &self, - room_id: &RoomId, - appservice: &(String, serde_yaml::Value), - db: &Database, - ) -> Result { - let maybe = self - .appservice_in_room_cache - .read() - .unwrap() - .get(room_id) - .and_then(|map| map.get(&appservice.0)) - .copied(); - - if let Some(b) = maybe { - Ok(b) - } else if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - - let bridge_user_id = appservice - .1 - .get("sender_localpart") - .and_then(|string| string.as_str()) - .and_then(|string| { - UserId::parse_with_server_name(string, db.globals.server_name()).ok() - }); - - let in_room = bridge_user_id - .map_or(false, |id| self.is_joined(&id, room_id).unwrap_or(false)) - || self.room_members(room_id).any(|userid| { - userid.map_or(false, |userid| { - users.iter().any(|r| r.is_match(userid.as_str())) - }) - }); - - self.appservice_in_room_cache - .write() - .unwrap() - .entry(room_id.clone()) - .or_default() - .insert(appservice.0.clone(), in_room); - - Ok(in_room) - } else { - Ok(false) - } - } - - #[tracing::instrument(skip(self, db))] - pub async fn leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - // Ask a remote server if we don't have this room - if !self.exists(room_id)? && room_id.server_name() != db.globals.server_name() { - if let Err(e) = self.remote_leave_room(user_id, room_id, db).await { - warn!("Failed to leave room {} remotely: {}", user_id, e); - // Don't tell the client about this error - } - - let last_state = self - .invite_state(user_id, room_id)? - .map_or_else(|| self.left_state(user_id, room_id), |s| Ok(Some(s)))?; - - // We always drop the invite, we can't rely on other servers - self.update_membership( - room_id, - user_id, - MembershipState::Leave, - user_id, - last_state, - db, - true, - )?; - } else { - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - let mut event: RoomMemberEventContent = serde_json::from_str( - self.room_state_get(room_id, &EventType::RoomMember, &user_id.to_string())? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "Cannot leave a room you are not a member of.", - ))? - .content - .get(), - ) - .map_err(|_| Error::bad_database("Invalid member event in database."))?; - - event.membership = MembershipState::Leave; - - self.build_and_append_pdu( - PduBuilder { - event_type: EventType::RoomMember, - content: to_raw_value(&event).expect("event is valid, we just created it"), - unsigned: None, - state_key: Some(user_id.to_string()), - redacts: None, - }, - user_id, - room_id, - db, - &state_lock, - )?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self, db))] - async fn remote_leave_room( - &self, - user_id: &UserId, - room_id: &RoomId, - db: &Database, - ) -> Result<()> { - let mut make_leave_response_and_server = Err(Error::BadServerResponse( - "No server available to assist in leaving.", - )); - - let invite_state = db - .rooms - .invite_state(user_id, room_id)? - .ok_or(Error::BadRequest( - ErrorKind::BadState, - "User is not invited.", - ))?; - - let servers: HashSet<_> = invite_state - .iter() - .filter_map(|event| serde_json::from_str(event.json().get()).ok()) - .filter_map(|event: serde_json::Value| event.get("sender").cloned()) - .filter_map(|sender| sender.as_str().map(|s| s.to_owned())) - .filter_map(|sender| UserId::try_from(sender).ok()) - .map(|user| user.server_name().to_owned()) - .collect(); - - for remote_server in servers { - let make_leave_response = db - .sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::get_leave_event::v1::Request { room_id, user_id }, - ) - .await; - - make_leave_response_and_server = make_leave_response.map(|r| (r, remote_server)); - - if make_leave_response_and_server.is_ok() { - break; - } - } - - let (make_leave_response, remote_server) = make_leave_response_and_server?; - - let room_version_id = match make_leave_response.room_version { - Some(version) - if version == RoomVersionId::Version5 || version == RoomVersionId::Version6 => - { - version - } - _ => return Err(Error::BadServerResponse("Room version is not supported")), - }; - - let mut leave_event_stub = - serde_json::from_str::(make_leave_response.event.get()).map_err( - |_| Error::BadServerResponse("Invalid make_leave event json received from server."), - )?; - - // TODO: Is origin needed? - leave_event_stub.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - leave_event_stub.insert( - "origin_server_ts".to_owned(), - CanonicalJsonValue::Integer( - utils::millis_since_unix_epoch() - .try_into() - .expect("Timestamp is valid js_int value"), - ), - ); - // We don't leave the event id in the pdu because that's only allowed in v1 or v2 rooms - leave_event_stub.remove("event_id"); - - // In order to create a compatible ref hash (EventID) the `hashes` field needs to be present - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut leave_event_stub, - &room_version_id, - ) - .expect("event is valid, we just created it"); - - // Generate event id - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&leave_event_stub, &room_version_id) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - leave_event_stub.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - - // It has enough fields to be called a proper event now - let leave_event = leave_event_stub; - - db.sending - .send_federation_request( - &db.globals, - &remote_server, - federation::membership::create_leave_event::v2::Request { - room_id, - event_id: &event_id, - pdu: &PduEvent::convert_to_outgoing_federation_event(leave_event.clone()), - }, - ) - .await?; - - Ok(()) - } - - /// Makes a user forget a room. - #[tracing::instrument(skip(self))] - pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - let mut roomuser_id = room_id.as_bytes().to_vec(); - roomuser_id.push(0xff); - roomuser_id.extend_from_slice(user_id.as_bytes()); - - self.userroomid_leftstate.remove(&userroom_id)?; - self.roomuserid_leftcount.remove(&roomuser_id)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, globals))] - pub fn set_alias( - &self, - alias: &RoomAliasId, - room_id: Option<&RoomId>, - globals: &super::globals::Globals, - ) -> Result<()> { - if let Some(room_id) = room_id { - // New alias - self.alias_roomid - .insert(alias.alias().as_bytes(), room_id.as_bytes())?; - let mut aliasid = room_id.as_bytes().to_vec(); - aliasid.push(0xff); - aliasid.extend_from_slice(&globals.next_count()?.to_be_bytes()); - self.aliasid_alias.insert(&aliasid, &*alias.as_bytes())?; - } else { - // room_id=None means remove alias - if let Some(room_id) = self.alias_roomid.get(alias.alias().as_bytes())? { - let mut prefix = room_id.to_vec(); - prefix.push(0xff); - - for (key, _) in self.aliasid_alias.scan_prefix(prefix) { - self.aliasid_alias.remove(&key)?; - } - self.alias_roomid.remove(alias.alias().as_bytes())?; - } else { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Alias does not exist.", - )); - } - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn id_from_alias(&self, alias: &RoomAliasId) -> Result> { - self.alias_roomid - .get(alias.alias().as_bytes())? - .map(|bytes| { - RoomId::try_from(utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in alias_roomid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("Room ID in alias_roomid is invalid.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_aliases<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.aliasid_alias.scan_prefix(prefix).map(|(_, bytes)| { - utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid alias bytes in aliasid_alias."))? - .try_into() - .map_err(|_| Error::bad_database("Invalid alias in aliasid_alias.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn set_public(&self, room_id: &RoomId, public: bool) -> Result<()> { - if public { - self.publicroomids.insert(room_id.as_bytes(), &[])?; - } else { - self.publicroomids.remove(room_id.as_bytes())?; - } - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn is_public_room(&self, room_id: &RoomId) -> Result { - Ok(self.publicroomids.get(room_id.as_bytes())?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn public_rooms(&self) -> impl Iterator> + '_ { - self.publicroomids.iter().map(|(bytes, _)| { - RoomId::try_from( - utils::string_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Room ID in publicroomids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in publicroomids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn search_pdus<'a>( - &'a self, - room_id: &RoomId, - search_string: &str, - ) -> Result<(impl Iterator> + 'a, Vec)> { - let prefix = self - .get_shortroomid(room_id)? - .expect("room exists") - .to_be_bytes() - .to_vec(); - let prefix_clone = prefix.clone(); - - let words: Vec<_> = search_string - .split_terminator(|c: char| !c.is_alphanumeric()) - .filter(|s| !s.is_empty()) - .map(str::to_lowercase) - .collect(); - - let iterators = words.clone().into_iter().map(move |word| { - let mut prefix2 = prefix.clone(); - prefix2.extend_from_slice(word.as_bytes()); - prefix2.push(0xff); - - let mut last_possible_id = prefix2.clone(); - last_possible_id.extend_from_slice(&u64::MAX.to_be_bytes()); - - self.tokenids - .iter_from(&last_possible_id, true) // Newest pdus first - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(|(key, _)| key[key.len() - size_of::()..].to_vec()) - }); - - Ok(( - utils::common_elements(iterators, |a, b| { - // We compare b with a because we reversed the iterator earlier - b.cmp(a) - }) - .unwrap() - .map(move |id| { - let mut pduid = prefix_clone.clone(); - pduid.extend_from_slice(&id); - pduid - }), - words, - )) - } - - #[tracing::instrument(skip(self))] - pub fn get_shared_rooms<'a>( - &'a self, - users: Vec, - ) -> Result> + 'a> { - let iterators = users.into_iter().map(move |user_id| { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_joined - .scan_prefix(prefix) - .map(|(key, _)| { - let roomid_index = key - .iter() - .enumerate() - .find(|(_, &b)| b == 0xff) - .ok_or_else(|| Error::bad_database("Invalid userroomid_joined in db."))? - .0 - + 1; // +1 because the room id starts AFTER the separator - - let room_id = key[roomid_index..].to_vec(); - - Ok::<_, Error>(room_id) - }) - .filter_map(|r| r.ok()) - }); - - // We use the default compare function because keys are sorted correctly (not reversed) - Ok(utils::common_elements(iterators, Ord::cmp) - .expect("users is not empty") - .map(|bytes| { - RoomId::try_from(utils::string_from_bytes(&*bytes).map_err(|_| { - Error::bad_database("Invalid RoomId bytes in userroomid_joined") - })?) - .map_err(|_| Error::bad_database("Invalid RoomId in userroomid_joined.")) - })) - } - - /// Returns an iterator of all servers participating in this room. - #[tracing::instrument(skip(self))] - pub fn room_servers<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator>> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomserverids.scan_prefix(prefix).map(|(key, _)| { - Box::::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Server name in roomserverids is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Server name in roomserverids is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.serverroomids.get(&key).map(|o| o.is_some()) - } - - /// Returns an iterator of all rooms a server participates in (as far as we know). - #[tracing::instrument(skip(self))] - pub fn server_rooms<'a>( - &'a self, - server: &ServerName, - ) -> impl Iterator> + 'a { - let mut prefix = server.as_bytes().to_vec(); - prefix.push(0xff); - - self.serverroomids.scan_prefix(prefix).map(|(key, _)| { - RoomId::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid unicode."))?, - ) - .map_err(|_| Error::bad_database("RoomId in serverroomids is invalid.")) - }) - } - - /// Returns an iterator over all joined members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_joined.scan_prefix(prefix).map(|(key, _)| { - UserId::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_joined is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { - self.roomid_joinedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { - self.roomid_invitedcount - .get(room_id.as_bytes())? - .map(|b| { - utils::u64_from_bytes(&b) - .map_err(|_| Error::bad_database("Invalid joinedcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all User IDs who ever joined a room. - #[tracing::instrument(skip(self))] - pub fn room_useroncejoined<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuseroncejoinedids - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in room_useroncejoined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in room_useroncejoined is invalid.")) - }) - } - - /// Returns an iterator over all invited members of a room. - #[tracing::instrument(skip(self))] - pub fn room_members_invited<'a>( - &'a self, - room_id: &RoomId, - ) -> impl Iterator> + 'a { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.roomuserid_invitecount - .scan_prefix(prefix) - .map(|(key, _)| { - UserId::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("User ID in roomuserid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("User ID in roomuserid_invited is invalid.")) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_invitecount - .get(&key)? - .map_or(Ok(None), |bytes| { - Ok(Some(utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid invitecount in db.") - })?)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_leftcount - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid leftcount in db.")) - }) - .transpose() - } - - /// Returns an iterator over all rooms this user joined. - #[tracing::instrument(skip(self))] - pub fn rooms_joined<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator> + 'a { - self.userroomid_joined - .scan_prefix(user_id.as_bytes().to_vec()) - .map(|(key, _)| { - RoomId::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_joined is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_joined is invalid.")) - }) - } - - /// Returns an iterator over all rooms a user was invited to. - #[tracing::instrument(skip(self))] - pub fn rooms_invited<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_invitestate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn invite_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_invitestate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_invitestate."))?; - - Ok(state) - }) - .transpose() - } - - #[tracing::instrument(skip(self))] - pub fn left_state( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result>>> { - let mut key = user_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(room_id.as_bytes()); - - self.userroomid_leftstate - .get(&key)? - .map(|state| { - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok(state) - }) - .transpose() - } - - /// Returns an iterator over all rooms a user left. - #[tracing::instrument(skip(self))] - pub fn rooms_left<'a>( - &'a self, - user_id: &UserId, - ) -> impl Iterator>)>> + 'a { - let mut prefix = user_id.as_bytes().to_vec(); - prefix.push(0xff); - - self.userroomid_leftstate - .scan_prefix(prefix) - .map(|(key, state)| { - let room_id = RoomId::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| { - Error::bad_database("Room ID in userroomid_invited is invalid unicode.") - })?, - ) - .map_err(|_| Error::bad_database("Room ID in userroomid_invited is invalid."))?; - - let state = serde_json::from_slice(&state) - .map_err(|_| Error::bad_database("Invalid state in userroomid_leftstate."))?; - - Ok((room_id, state)) - }) - } - - #[tracing::instrument(skip(self))] - pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.roomuseroncejoinedids.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_joined.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_invitestate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut userroom_id = user_id.as_bytes().to_vec(); - userroom_id.push(0xff); - userroom_id.extend_from_slice(room_id.as_bytes()); - - Ok(self.userroomid_leftstate.get(&userroom_id)?.is_some()) - } - - #[tracing::instrument(skip(self))] - pub fn get_auth_chain_from_cache<'a>( - &'a self, - key: &[u64], - ) -> Result>>> { - // Check RAM cache - if let Some(result) = self.auth_chain_cache.lock().unwrap().get_mut(key) { - return Ok(Some(Arc::clone(result))); - } - - // Check DB cache - if key.len() == 1 { - if let Some(chain) = - self.shorteventid_authchain - .get(&key[0].to_be_bytes())? - .map(|chain| { - chain - .chunks_exact(size_of::()) - .map(|chunk| { - utils::u64_from_bytes(chunk).expect("byte length is correct") - }) - .collect() - }) - { - let chain = Arc::new(chain); - - // Cache in RAM - self.auth_chain_cache - .lock() - .unwrap() - .insert(vec![key[0]], Arc::clone(&chain)); - - return Ok(Some(chain)); - } - } - - Ok(None) - } - - #[tracing::instrument(skip(self))] - pub fn cache_auth_chain(&self, key: Vec, chain: Arc>) -> Result<()> { - // Persist in db - if key.len() == 1 { - self.shorteventid_authchain.insert( - &key[0].to_be_bytes(), - &chain - .iter() - .map(|s| s.to_be_bytes().to_vec()) - .flatten() - .collect::>(), - )?; - } - - // Cache in RAM - self.auth_chain_cache.lock().unwrap().insert(key, chain); - - Ok(()) - } -} diff --git a/src/database/rooms/edus.rs b/src/database/rooms/edus.rs deleted file mode 100644 index 9a27e437..00000000 --- a/src/database/rooms/edus.rs +++ /dev/null @@ -1,549 +0,0 @@ -use crate::{database::abstraction::Tree, utils, Error, Result}; -use ruma::{ - events::{ - presence::{PresenceEvent, PresenceEventContent}, - AnyEphemeralRoomEvent, SyncEphemeralRoomEvent, - }, - presence::PresenceState, - serde::Raw, - signatures::CanonicalJsonObject, - RoomId, UInt, UserId, -}; -use std::{ - collections::{HashMap, HashSet}, - convert::{TryFrom, TryInto}, - mem, - sync::Arc, -}; - -pub struct RoomEdus { - pub(in super::super) readreceiptid_readreceipt: Arc, // ReadReceiptId = RoomId + Count + UserId - pub(in super::super) roomuserid_privateread: Arc, // RoomUserId = Room + User, PrivateRead = Count - pub(in super::super) roomuserid_lastprivatereadupdate: Arc, // LastPrivateReadUpdate = Count - pub(in super::super) typingid_userid: Arc, // TypingId = RoomId + TimeoutTime + Count - pub(in super::super) roomid_lasttypingupdate: Arc, // LastRoomTypingUpdate = Count - pub(in super::super) presenceid_presence: Arc, // PresenceId = RoomId + Count + UserId - pub(in super::super) userid_lastpresenceupdate: Arc, // LastPresenceUpdate = Count -} - -impl RoomEdus { - /// Adds an event which will be saved until a new event replaces it (e.g. read receipt). - pub fn readreceipt_update( - &self, - user_id: &UserId, - room_id: &RoomId, - event: AnyEphemeralRoomEvent, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut last_possible_key = prefix.clone(); - last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); - - // Remove old entry - if let Some((old, _)) = self - .readreceiptid_readreceipt - .iter_from(&last_possible_key, true) - .take_while(|(key, _)| key.starts_with(&prefix)) - .find(|(key, _)| { - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element") - == user_id.as_bytes() - }) - { - // This is the old room_latest - self.readreceiptid_readreceipt.remove(&old)?; - } - - let mut room_latest_id = prefix; - room_latest_id.extend_from_slice(&globals.next_count()?.to_be_bytes()); - room_latest_id.push(0xff); - room_latest_id.extend_from_slice(user_id.as_bytes()); - - self.readreceiptid_readreceipt.insert( - &room_latest_id, - &serde_json::to_vec(&event).expect("EduEvent::to_string always works"), - )?; - - Ok(()) - } - - /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. - #[tracing::instrument(skip(self))] - pub fn readreceipts_since<'a>( - &'a self, - room_id: &RoomId, - since: u64, - ) -> impl Iterator)>> + 'a - { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - let prefix2 = prefix.clone(); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - - self.readreceiptid_readreceipt - .iter_from(&first_possible_edu, false) - .take_while(move |(k, _)| k.starts_with(&prefix2)) - .map(move |(k, v)| { - let count = - utils::u64_from_bytes(&k[prefix.len()..prefix.len() + mem::size_of::()]) - .map_err(|_| Error::bad_database("Invalid readreceiptid count in db."))?; - let user_id = UserId::try_from( - utils::string_from_bytes(&k[prefix.len() + mem::size_of::() + 1..]) - .map_err(|_| { - Error::bad_database("Invalid readreceiptid userid bytes in db.") - })?, - ) - .map_err(|_| Error::bad_database("Invalid readreceiptid userid in db."))?; - - let mut json = serde_json::from_slice::(&v).map_err(|_| { - Error::bad_database("Read receipt in roomlatestid_roomlatest is invalid json.") - })?; - json.remove("room_id"); - - Ok(( - user_id, - count, - Raw::from_json( - serde_json::value::to_raw_value(&json).expect("json is valid raw value"), - ), - )) - }) - } - - /// Sets a private read marker at `count`. - #[tracing::instrument(skip(self, globals))] - pub fn private_read_set( - &self, - room_id: &RoomId, - user_id: &UserId, - count: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .insert(&key, &count.to_be_bytes())?; - - self.roomuserid_lastprivatereadupdate - .insert(&key, &globals.next_count()?.to_be_bytes())?; - - Ok(()) - } - - /// Returns the private read marker. - #[tracing::instrument(skip(self))] - pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - self.roomuserid_privateread - .get(&key)? - .map_or(Ok(None), |v| { - Ok(Some(utils::u64_from_bytes(&v).map_err(|_| { - Error::bad_database("Invalid private read marker bytes") - })?)) - }) - } - - /// Returns the count of the last typing update in this room. - pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let mut key = room_id.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(user_id.as_bytes()); - - Ok(self - .roomuserid_lastprivatereadupdate - .get(&key)? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomuserid_lastprivatereadupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is - /// called. - pub fn typing_add( - &self, - user_id: &UserId, - room_id: &RoomId, - timeout: u64, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let count = globals.next_count()?.to_be_bytes(); - - let mut room_typing_id = prefix; - room_typing_id.extend_from_slice(&timeout.to_be_bytes()); - room_typing_id.push(0xff); - room_typing_id.extend_from_slice(&count); - - self.typingid_userid - .insert(&room_typing_id, &*user_id.as_bytes())?; - - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &count)?; - - Ok(()) - } - - /// Removes a user from typing before the timeout is reached. - pub fn typing_remove( - &self, - user_id: &UserId, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let user_id = user_id.to_string(); - - let mut found_outdated = false; - - // Maybe there are multiple ones from calling roomtyping_add multiple times - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .filter(|(_, v)| &**v == user_id.as_bytes()) - { - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - /// Makes sure that typing events with old timestamps get removed. - fn typings_maintain( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let current_timestamp = utils::millis_since_unix_epoch(); - - let mut found_outdated = false; - - // Find all outdated edus before inserting a new one - for outdated_edu in self - .typingid_userid - .scan_prefix(prefix) - .map(|(key, _)| { - Ok::<_, Error>(( - key.clone(), - utils::u64_from_bytes( - &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { - Error::bad_database("RoomTyping has invalid timestamp or delimiters.") - })?[0..mem::size_of::()], - ) - .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, - )) - }) - .filter_map(|r| r.ok()) - .take_while(|&(_, timestamp)| timestamp < current_timestamp) - { - // This is an outdated edu (time > timestamp) - self.typingid_userid.remove(&outdated_edu.0)?; - found_outdated = true; - } - - if found_outdated { - self.roomid_lasttypingupdate - .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; - } - - Ok(()) - } - - /// Returns the count of the last typing update in this room. - #[tracing::instrument(skip(self, globals))] - pub fn last_typing_update( - &self, - room_id: &RoomId, - globals: &super::super::globals::Globals, - ) -> Result { - self.typings_maintain(room_id, globals)?; - - Ok(self - .roomid_lasttypingupdate - .get(room_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Count in roomid_lastroomactiveupdate is invalid.") - }) - }) - .transpose()? - .unwrap_or(0)) - } - - pub fn typings_all( - &self, - room_id: &RoomId, - ) -> Result> { - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut user_ids = HashSet::new(); - - for user_id in self - .typingid_userid - .scan_prefix(prefix) - .map(|(_, user_id)| { - UserId::try_from(utils::string_from_bytes(&user_id).map_err(|_| { - Error::bad_database("User ID in typingid_userid is invalid unicode.") - })?) - .map_err(|_| Error::bad_database("User ID in typingid_userid is invalid.")) - }) - { - user_ids.insert(user_id?); - } - - Ok(SyncEphemeralRoomEvent { - content: ruma::events::typing::TypingEventContent { - user_ids: user_ids.into_iter().collect(), - }, - }) - } - - /// Adds a presence event which will be saved until a new event replaces it. - /// - /// Note: This method takes a RoomId because presence updates are always bound to rooms to - /// make sure users outside these rooms can't see them. - pub fn update_presence( - &self, - user_id: &UserId, - room_id: &RoomId, - presence: PresenceEvent, - globals: &super::super::globals::Globals, - ) -> Result<()> { - // TODO: Remove old entry? Or maybe just wipe completely from time to time? - - let count = globals.next_count()?.to_be_bytes(); - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(presence.sender.as_bytes()); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&presence).expect("PresenceEvent can be serialized"), - )?; - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - /// Resets the presence timeout, so the user will stay in their current presence state. - #[tracing::instrument(skip(self))] - pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - - Ok(()) - } - - /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. - pub fn last_presence_update(&self, user_id: &UserId) -> Result> { - self.userid_lastpresenceupdate - .get(user_id.as_bytes())? - .map(|bytes| { - utils::u64_from_bytes(&bytes).map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - }) - .transpose() - } - - pub fn get_last_presence_event( - &self, - user_id: &UserId, - room_id: &RoomId, - ) -> Result> { - let last_update = match self.last_presence_update(user_id)? { - Some(last) => last, - None => return Ok(None), - }; - - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&last_update.to_be_bytes()); - presence_id.push(0xff); - presence_id.extend_from_slice(user_id.as_bytes()); - - self.presenceid_presence - .get(&presence_id)? - .map(|value| { - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - Ok(presence) - }) - .transpose() - } - - /// Sets all users to offline who have been quiet for too long. - fn _presence_maintain( - &self, - rooms: &super::Rooms, - globals: &super::super::globals::Globals, - ) -> Result<()> { - let current_timestamp = utils::millis_since_unix_epoch(); - - for (user_id_bytes, last_timestamp) in self - .userid_lastpresenceupdate - .iter() - .filter_map(|(k, bytes)| { - Some(( - k, - utils::u64_from_bytes(&bytes) - .map_err(|_| { - Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") - }) - .ok()?, - )) - }) - .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) - // 5 Minutes - { - // Send new presence events to set the user offline - let count = globals.next_count()?.to_be_bytes(); - let user_id = utils::string_from_bytes(&user_id_bytes) - .map_err(|_| { - Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") - })? - .try_into() - .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; - for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { - let mut presence_id = room_id.as_bytes().to_vec(); - presence_id.push(0xff); - presence_id.extend_from_slice(&count); - presence_id.push(0xff); - presence_id.extend_from_slice(&user_id_bytes); - - self.presenceid_presence.insert( - &presence_id, - &serde_json::to_vec(&PresenceEvent { - content: PresenceEventContent { - avatar_url: None, - currently_active: None, - displayname: None, - last_active_ago: Some( - last_timestamp.try_into().expect("time is valid"), - ), - presence: PresenceState::Offline, - status_msg: None, - }, - sender: user_id.clone(), - }) - .expect("PresenceEvent can be serialized"), - )?; - } - - self.userid_lastpresenceupdate.insert( - user_id.as_bytes(), - &utils::millis_since_unix_epoch().to_be_bytes(), - )?; - } - - Ok(()) - } - - /// Returns an iterator over the most recent presence updates that happened after the event with id `since`. - #[tracing::instrument(skip(self, since, _rooms, _globals))] - pub fn presence_since( - &self, - room_id: &RoomId, - since: u64, - _rooms: &super::Rooms, - _globals: &super::super::globals::Globals, - ) -> Result> { - //self.presence_maintain(rooms, globals)?; - - let mut prefix = room_id.as_bytes().to_vec(); - prefix.push(0xff); - - let mut first_possible_edu = prefix.clone(); - first_possible_edu.extend_from_slice(&(since + 1).to_be_bytes()); // +1 so we don't send the event at since - let mut hashmap = HashMap::new(); - - for (key, value) in self - .presenceid_presence - .iter_from(&*first_possible_edu, false) - .take_while(|(key, _)| key.starts_with(&prefix)) - { - let user_id = UserId::try_from( - utils::string_from_bytes( - key.rsplit(|&b| b == 0xff) - .next() - .expect("rsplit always returns an element"), - ) - .map_err(|_| Error::bad_database("Invalid UserId bytes in presenceid_presence."))?, - ) - .map_err(|_| Error::bad_database("Invalid UserId in presenceid_presence."))?; - - let mut presence: PresenceEvent = serde_json::from_slice(&value) - .map_err(|_| Error::bad_database("Invalid presence event in db."))?; - - let current_timestamp: UInt = utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"); - - if presence.content.presence == PresenceState::Online { - // Don't set last_active_ago when the user is online - presence.content.last_active_ago = None; - } else { - // Convert from timestamp to duration - presence.content.last_active_ago = presence - .content - .last_active_ago - .map(|timestamp| current_timestamp - timestamp); - } - - hashmap.insert(user_id, presence); - } - - Ok(hashmap) - } -} diff --git a/src/database/sending.rs b/src/database/sending.rs deleted file mode 100644 index bf0cc2c1..00000000 --- a/src/database/sending.rs +++ /dev/null @@ -1,819 +0,0 @@ -use std::{ - collections::{BTreeMap, HashMap, HashSet}, - convert::{TryFrom, TryInto}, - fmt::Debug, - sync::Arc, - time::{Duration, Instant}, -}; - -use crate::{ - appservice_server, database::pusher, server_server, utils, Database, Error, PduEvent, Result, -}; -use federation::transactions::send_transaction_message; -use ring::digest; -use rocket::futures::{ - channel::mpsc, - stream::{FuturesUnordered, StreamExt}, -}; -use ruma::{ - api::{ - appservice, - federation::{ - self, - transactions::edu::{ - DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, - }, - }, - OutgoingRequest, - }, - device_id, - events::{push_rules::PushRulesEvent, AnySyncEphemeralRoomEvent, EventType}, - push, - receipt::ReceiptType, - uint, MilliSecondsSinceUnixEpoch, ServerName, UInt, UserId, -}; -use tokio::{ - select, - sync::{RwLock, Semaphore}, -}; -use tracing::{error, warn}; - -use super::abstraction::Tree; - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum OutgoingKind { - Appservice(Box), - Push(Vec, Vec), // user and pushkey - Normal(Box), -} - -impl OutgoingKind { - #[tracing::instrument(skip(self))] - pub fn get_prefix(&self) -> Vec { - let mut prefix = match self { - OutgoingKind::Appservice(server) => { - let mut p = b"+".to_vec(); - p.extend_from_slice(server.as_bytes()); - p - } - OutgoingKind::Push(user, pushkey) => { - let mut p = b"$".to_vec(); - p.extend_from_slice(user); - p.push(0xff); - p.extend_from_slice(pushkey); - p - } - OutgoingKind::Normal(server) => { - let mut p = Vec::new(); - p.extend_from_slice(server.as_bytes()); - p - } - }; - prefix.push(0xff); - - prefix - } -} - -#[derive(Clone, Debug, PartialEq, Eq, Hash)] -pub enum SendingEventType { - Pdu(Vec), - Edu(Vec), -} - -pub struct Sending { - /// The state for a given state hash. - pub(super) servername_educount: Arc, // EduCount: Count of last EDU sync - pub(super) servernameevent_data: Arc, // ServernameEvent = (+ / $)SenderKey / ServerName / UserId + PduId / Id (for edus), Data = EDU content - pub(super) servercurrentevent_data: Arc, // ServerCurrentEvents = (+ / $)ServerName / UserId + PduId / Id (for edus), Data = EDU content - pub(super) maximum_requests: Arc, - pub sender: mpsc::UnboundedSender<(Vec, Vec)>, -} - -enum TransactionStatus { - Running, - Failed(u32, Instant), // number of times failed, time of last failure - Retrying(u32), // number of times failed -} - -impl Sending { - pub fn start_handler( - &self, - db: Arc>, - mut receiver: mpsc::UnboundedReceiver<(Vec, Vec)>, - ) { - tokio::spawn(async move { - let mut futures = FuturesUnordered::new(); - - let mut current_transaction_status = HashMap::, TransactionStatus>::new(); - - // Retry requests we could not finish yet - let mut initial_transactions = HashMap::>::new(); - - let guard = db.read().await; - - for (key, outgoing_kind, event) in guard - .sending - .servercurrentevent_data - .iter() - .filter_map(|(key, v)| { - Self::parse_servercurrentevent(&key, v) - .ok() - .map(|(k, e)| (key, k, e)) - }) - { - let entry = initial_transactions - .entry(outgoing_kind.clone()) - .or_insert_with(Vec::new); - - if entry.len() > 30 { - warn!( - "Dropping some current events: {:?} {:?} {:?}", - key, outgoing_kind, event - ); - guard.sending.servercurrentevent_data.remove(&key).unwrap(); - continue; - } - - entry.push(event); - } - - drop(guard); - - for (outgoing_kind, events) in initial_transactions { - current_transaction_status - .insert(outgoing_kind.get_prefix(), TransactionStatus::Running); - futures.push(Self::handle_events( - outgoing_kind.clone(), - events, - Arc::clone(&db), - )); - } - - loop { - select! { - Some(response) = futures.next() => { - match response { - Ok(outgoing_kind) => { - let guard = db.read().await; - - let prefix = outgoing_kind.get_prefix(); - for (key, _) in guard.sending.servercurrentevent_data - .scan_prefix(prefix.clone()) - { - guard.sending.servercurrentevent_data.remove(&key).unwrap(); - } - - // Find events that have been added since starting the last request - let new_events: Vec<_> = guard.sending.servernameevent_data - .scan_prefix(prefix.clone()) - .filter_map(|(k, v)| { - Self::parse_servercurrentevent(&k, v).ok().map(|ev| (ev, k)) - }) - .take(30) - .collect::<>(); - - // TODO: find edus - - if !new_events.is_empty() { - // Insert pdus we found - for (e, key) in &new_events { - let value = if let SendingEventType::Edu(value) = &e.1 { &**value } else { &[] }; - guard.sending.servercurrentevent_data.insert(key, value).unwrap(); - guard.sending.servernameevent_data.remove(key).unwrap(); - } - - drop(guard); - - futures.push( - Self::handle_events( - outgoing_kind.clone(), - new_events.into_iter().map(|(event, _)| event.1).collect(), - Arc::clone(&db), - ) - ); - } else { - current_transaction_status.remove(&prefix); - } - } - Err((outgoing_kind, _)) => { - current_transaction_status.entry(outgoing_kind.get_prefix()).and_modify(|e| *e = match e { - TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), - TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), - TransactionStatus::Failed(_, _) => { - error!("Request that was not even running failed?!"); - return - }, - }); - } - }; - }, - Some((key, value)) = receiver.next() => { - if let Ok((outgoing_kind, event)) = Self::parse_servercurrentevent(&key, value) { - let guard = db.read().await; - - if let Ok(Some(events)) = Self::select_events( - &outgoing_kind, - vec![(event, key)], - &mut current_transaction_status, - &guard - ) { - futures.push(Self::handle_events(outgoing_kind, events, Arc::clone(&db))); - } - } - } - } - } - }); - } - - #[tracing::instrument(skip(outgoing_kind, new_events, current_transaction_status, db))] - fn select_events( - outgoing_kind: &OutgoingKind, - new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key - current_transaction_status: &mut HashMap, TransactionStatus>, - db: &Database, - ) -> Result>> { - let mut retry = false; - let mut allow = true; - - let prefix = outgoing_kind.get_prefix(); - let entry = current_transaction_status.entry(prefix.clone()); - - entry - .and_modify(|e| match e { - TransactionStatus::Running | TransactionStatus::Retrying(_) => { - allow = false; // already running - } - TransactionStatus::Failed(tries, time) => { - // Fail if a request has failed recently (exponential backoff) - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - allow = false; - } else { - retry = true; - *e = TransactionStatus::Retrying(*tries); - } - } - }) - .or_insert(TransactionStatus::Running); - - if !allow { - return Ok(None); - } - - let mut events = Vec::new(); - - if retry { - // We retry the previous transaction - for (key, value) in db.sending.servercurrentevent_data.scan_prefix(prefix) { - if let Ok((_, e)) = Self::parse_servercurrentevent(&key, value) { - events.push(e); - } - } - } else { - for (e, full_key) in new_events { - let value = if let SendingEventType::Edu(value) = &e { - &**value - } else { - &[][..] - }; - db.sending - .servercurrentevent_data - .insert(&full_key, value)?; - - // If it was a PDU we have to unqueue it - // TODO: don't try to unqueue EDUs - db.sending.servernameevent_data.remove(&full_key)?; - - events.push(e); - } - - if let OutgoingKind::Normal(server_name) = outgoing_kind { - if let Ok((select_edus, last_count)) = Self::select_edus(db, server_name) { - events.extend(select_edus.into_iter().map(SendingEventType::Edu)); - - db.sending - .servername_educount - .insert(server_name.as_bytes(), &last_count.to_be_bytes())?; - } - } - } - - Ok(Some(events)) - } - - #[tracing::instrument(skip(db, server))] - pub fn select_edus(db: &Database, server: &ServerName) -> Result<(Vec>, u64)> { - // u64: count of last edu - let since = db - .sending - .servername_educount - .get(server.as_bytes())? - .map_or(Ok(0), |bytes| { - utils::u64_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid u64 in servername_educount.")) - })?; - let mut events = Vec::new(); - let mut max_edu_count = since; - let mut device_list_changes = HashSet::new(); - - 'outer: for room_id in db.rooms.server_rooms(server) { - let room_id = room_id?; - // Look for device list updates in this room - device_list_changes.extend( - db.users - .keys_changed(&room_id.to_string(), since, None) - .filter_map(|r| r.ok()) - .filter(|user_id| user_id.server_name() == db.globals.server_name()), - ); - - // Look for read receipts in this room - for r in db.rooms.edus.readreceipts_since(&room_id, since) { - let (user_id, count, read_receipt) = r?; - - if count > max_edu_count { - max_edu_count = count; - } - - if user_id.server_name() != db.globals.server_name() { - continue; - } - - let event: AnySyncEphemeralRoomEvent = - serde_json::from_str(read_receipt.json().get()) - .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; - let federation_event = match event { - AnySyncEphemeralRoomEvent::Receipt(r) => { - let mut read = BTreeMap::new(); - - let (event_id, mut receipt) = r - .content - .0 - .into_iter() - .next() - .expect("we only use one event per read receipt"); - let receipt = receipt - .remove(&ReceiptType::Read) - .expect("our read receipts always set this") - .remove(&user_id) - .expect("our read receipts always have the user here"); - - read.insert( - user_id, - ReceiptData { - data: receipt.clone(), - event_ids: vec![event_id.clone()], - }, - ); - - let receipt_map = ReceiptMap { read }; - - let mut receipts = BTreeMap::new(); - receipts.insert(room_id.clone(), receipt_map); - - Edu::Receipt(ReceiptContent { receipts }) - } - _ => { - Error::bad_database("Invalid event type in read_receipts"); - continue; - } - }; - - events.push(serde_json::to_vec(&federation_event).expect("json can be serialized")); - - if events.len() >= 20 { - break 'outer; - } - } - } - - for user_id in device_list_changes { - // Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767 - // Because synapse resyncs, we can just insert dummy data - let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { - user_id, - device_id: device_id!("dummy"), - device_display_name: Some("Dummy".to_owned()), - stream_id: uint!(1), - prev_id: Vec::new(), - deleted: None, - keys: None, - }); - - events.push(serde_json::to_vec(&edu).expect("json can be serialized")); - } - - Ok((events, max_edu_count)) - } - - #[tracing::instrument(skip(self, pdu_id, senderkey))] - pub fn send_push_pdu(&self, pdu_id: &[u8], senderkey: Vec) -> Result<()> { - let mut key = b"$".to_vec(); - key.extend_from_slice(&senderkey); - key.push(0xff); - key.extend_from_slice(pdu_id); - self.servernameevent_data.insert(&key, &[])?; - self.sender.unbounded_send((key, vec![])).unwrap(); - - Ok(()) - } - - #[tracing::instrument(skip(self, servers, pdu_id))] - pub fn send_pdu>>( - &self, - servers: I, - pdu_id: &[u8], - ) -> Result<()> { - let mut batch = servers.map(|server| { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(pdu_id); - - self.sender.unbounded_send((key.clone(), vec![])).unwrap(); - - (key, Vec::new()) - }); - - self.servernameevent_data.insert_batch(&mut batch)?; - - Ok(()) - } - - #[tracing::instrument(skip(self, server, serialized))] - pub fn send_reliable_edu( - &self, - server: &ServerName, - serialized: Vec, - id: u64, - ) -> Result<()> { - let mut key = server.as_bytes().to_vec(); - key.push(0xff); - key.extend_from_slice(&id.to_be_bytes()); - self.servernameevent_data.insert(&key, &serialized)?; - self.sender.unbounded_send((key, serialized)).unwrap(); - - Ok(()) - } - - #[tracing::instrument(skip(self))] - pub fn send_pdu_appservice(&self, appservice_id: &str, pdu_id: &[u8]) -> Result<()> { - let mut key = b"+".to_vec(); - key.extend_from_slice(appservice_id.as_bytes()); - key.push(0xff); - key.extend_from_slice(pdu_id); - self.servernameevent_data.insert(&key, &[])?; - self.sender.unbounded_send((key, vec![])).unwrap(); - - Ok(()) - } - - #[tracing::instrument(skip(keys))] - fn calculate_hash(keys: &[&[u8]]) -> Vec { - // We only hash the pdu's event ids, not the whole pdu - let bytes = keys.join(&0xff); - let hash = digest::digest(&digest::SHA256, &bytes); - hash.as_ref().to_owned() - } - - #[tracing::instrument(skip(db, events, kind))] - async fn handle_events( - kind: OutgoingKind, - events: Vec, - db: Arc>, - ) -> Result { - let db = db.read().await; - - match &kind { - OutgoingKind::Appservice(server) => { - let mut pdu_jsons = Vec::new(); - - for event in &events { - match event { - SendingEventType::Pdu(pdu_id) => { - pdu_jsons.push(db.rooms - .get_pdu_from_id(pdu_id) - .map_err(|e| (kind.clone(), e))? - .ok_or_else(|| { - ( - kind.clone(), - Error::bad_database( - "[Appservice] Event in servernameevent_data not found in db.", - ), - ) - })? - .to_room_event()) - } - SendingEventType::Edu(_) => { - // Appservices don't need EDUs (?) - } - } - } - - let permit = db.sending.maximum_requests.acquire().await; - - let response = appservice_server::send_request( - &db.globals, - db.appservice - .get_registration(server.as_str()) - .unwrap() - .unwrap(), // TODO: handle error - appservice::event::push_events::v1::Request { - events: &pdu_jsons, - txn_id: &base64::encode_config( - Self::calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, - }) - .collect::>(), - ), - base64::URL_SAFE_NO_PAD, - ), - }, - ) - .await - .map(|_response| kind.clone()) - .map_err(|e| (kind, e)); - - drop(permit); - - response - } - OutgoingKind::Push(user, pushkey) => { - let mut pdus = Vec::new(); - - for event in &events { - match event { - SendingEventType::Pdu(pdu_id) => { - pdus.push( - db.rooms - .get_pdu_from_id(pdu_id) - .map_err(|e| (kind.clone(), e))? - .ok_or_else(|| { - ( - kind.clone(), - Error::bad_database( - "[Push] Event in servernamevent_datas not found in db.", - ), - ) - })?, - ); - } - SendingEventType::Edu(_) => { - // Push gateways don't need EDUs (?) - } - } - } - - for pdu in pdus { - // Redacted events are not notification targets (we don't send push for them) - if let Some(unsigned) = &pdu.unsigned { - if let Ok(unsigned) = - serde_json::from_str::(unsigned.get()) - { - if unsigned.get("redacted_because").is_some() { - continue; - } - } - } - - let userid = - UserId::try_from(utils::string_from_bytes(user).map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user string in db."), - ) - })?) - .map_err(|_| { - ( - kind.clone(), - Error::bad_database("Invalid push user id in db."), - ) - })?; - - let mut senderkey = user.clone(); - senderkey.push(0xff); - senderkey.extend_from_slice(pushkey); - - let pusher = match db - .pusher - .get_pusher(&senderkey) - .map_err(|e| (OutgoingKind::Push(user.clone(), pushkey.clone()), e))? - { - Some(pusher) => pusher, - None => continue, - }; - - let rules_for_user = db - .account_data - .get(None, &userid, EventType::PushRules) - .unwrap_or_default() - .map(|ev: PushRulesEvent| ev.content.global) - .unwrap_or_else(|| push::Ruleset::server_default(&userid)); - - let unread: UInt = db - .rooms - .notification_count(&userid, &pdu.room_id) - .map_err(|e| (kind.clone(), e))? - .try_into() - .expect("notifiation count can't go that high"); - - let permit = db.sending.maximum_requests.acquire().await; - - let _response = pusher::send_push_notice( - &userid, - unread, - &pusher, - rules_for_user, - &pdu, - &db, - ) - .await - .map(|_response| kind.clone()) - .map_err(|e| (kind.clone(), e)); - - drop(permit); - } - Ok(OutgoingKind::Push(user.clone(), pushkey.clone())) - } - OutgoingKind::Normal(server) => { - let mut edu_jsons = Vec::new(); - let mut pdu_jsons = Vec::new(); - - for event in &events { - match event { - SendingEventType::Pdu(pdu_id) => { - // TODO: check room version and remove event_id if needed - let raw = PduEvent::convert_to_outgoing_federation_event( - db.rooms - .get_pdu_json_from_id(pdu_id) - .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? - .ok_or_else(|| { - ( - OutgoingKind::Normal(server.clone()), - Error::bad_database( - "[Normal] Event in servernamevent_datas not found in db.", - ), - ) - })?, - ); - pdu_jsons.push(raw); - } - SendingEventType::Edu(edu) => { - if let Ok(raw) = serde_json::from_slice(edu) { - edu_jsons.push(raw); - } - } - } - } - - let permit = db.sending.maximum_requests.acquire().await; - - let response = server_server::send_request( - &db.globals, - &*server, - send_transaction_message::v1::Request { - origin: db.globals.server_name(), - pdus: &pdu_jsons, - edus: &edu_jsons, - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - transaction_id: &base64::encode_config( - Self::calculate_hash( - &events - .iter() - .map(|e| match e { - SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, - }) - .collect::>(), - ), - base64::URL_SAFE_NO_PAD, - ), - }, - ) - .await - .map(|response| { - for pdu in response.pdus { - if pdu.1.is_err() { - warn!("Failed to send to {}: {:?}", server, pdu); - } - } - kind.clone() - }) - .map_err(|e| (kind, e)); - - drop(permit); - - response - } - } - } - - #[tracing::instrument(skip(key))] - fn parse_servercurrentevent( - key: &[u8], - value: Vec, - ) -> Result<(OutgoingKind, SendingEventType)> { - // Appservices start with a plus - Ok::<_, Error>(if key.starts_with(b"+") { - let mut parts = key[1..].splitn(2, |&b| b == 0xff); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - ( - OutgoingKind::Appservice(Box::::try_from(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - } else if key.starts_with(b"$") { - let mut parts = key[1..].splitn(3, |&b| b == 0xff); - - let user = parts.next().expect("splitn always returns one element"); - let pushkey = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - ( - OutgoingKind::Push(user.to_vec(), pushkey.to_vec()), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - } else { - let mut parts = key.splitn(2, |&b| b == 0xff); - - let server = parts.next().expect("splitn always returns one element"); - let event = parts - .next() - .ok_or_else(|| Error::bad_database("Invalid bytes in servercurrentpdus."))?; - let server = utils::string_from_bytes(server).map_err(|_| { - Error::bad_database("Invalid server bytes in server_currenttransaction") - })?; - - ( - OutgoingKind::Normal(Box::::try_from(server).map_err(|_| { - Error::bad_database("Invalid server string in server_currenttransaction") - })?), - if value.is_empty() { - SendingEventType::Pdu(event.to_vec()) - } else { - SendingEventType::Edu(value) - }, - ) - }) - } - - #[tracing::instrument(skip(self, globals, destination, request))] - pub async fn send_federation_request( - &self, - globals: &crate::database::globals::Globals, - destination: &ServerName, - request: T, - ) -> Result - where - T: Debug, - { - let permit = self.maximum_requests.acquire().await; - let response = server_server::send_request(globals, destination, request).await; - drop(permit); - - response - } - - #[tracing::instrument(skip(self, globals, registration, request))] - pub async fn send_appservice_request( - &self, - globals: &crate::database::globals::Globals, - registration: serde_yaml::Value, - request: T, - ) -> Result - where - T: Debug, - { - let permit = self.maximum_requests.acquire().await; - let response = appservice_server::send_request(globals, registration, request).await; - drop(permit); - - response - } -} diff --git a/src/database/uiaa.rs b/src/database/uiaa.rs deleted file mode 100644 index 1c0fb566..00000000 --- a/src/database/uiaa.rs +++ /dev/null @@ -1,237 +0,0 @@ -use std::sync::Arc; - -use crate::{client_server::SESSION_ID_LENGTH, utils, Error, Result}; -use ruma::{ - api::client::{ - error::ErrorKind, - r0::uiaa::{ - AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier::MatrixId, - UiaaInfo, - }, - }, - signatures::CanonicalJsonValue, - DeviceId, UserId, -}; -use tracing::error; - -use super::abstraction::Tree; - -pub struct Uiaa { - pub(super) userdevicesessionid_uiaainfo: Arc, // User-interactive authentication - pub(super) userdevicesessionid_uiaarequest: Arc, // UiaaRequest = canonical json value -} - -impl Uiaa { - /// Creates a new Uiaa session. Make sure the session token is unique. - pub fn create( - &self, - user_id: &UserId, - device_id: &DeviceId, - uiaainfo: &UiaaInfo, - json_body: &CanonicalJsonValue, - ) -> Result<()> { - self.set_uiaa_request( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) - json_body, - )?; - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session should be set"), - Some(uiaainfo), - ) - } - - pub fn try_auth( - &self, - user_id: &UserId, - device_id: &DeviceId, - auth: &IncomingAuthData, - uiaainfo: &UiaaInfo, - users: &super::users::Users, - globals: &super::globals::Globals, - ) -> Result<(bool, UiaaInfo)> { - let mut uiaainfo = auth - .session() - .map(|session| self.get_uiaa_session(user_id, device_id, session)) - .unwrap_or_else(|| Ok(uiaainfo.clone()))?; - - if uiaainfo.session.is_none() { - uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); - } - - match auth { - // Find out what the user completed - IncomingAuthData::Password(IncomingPassword { - identifier, - password, - .. - }) => { - let username = match identifier { - MatrixId(username) => username, - _ => { - return Err(Error::BadRequest( - ErrorKind::Unrecognized, - "Identifier type not recognized.", - )) - } - }; - - let user_id = - UserId::parse_with_server_name(username.clone(), globals.server_name()) - .map_err(|_| { - Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid.") - })?; - - // Check if password is correct - if let Some(hash) = users.password_hash(&user_id)? { - let hash_matches = - argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); - - if !hash_matches { - uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { - kind: ErrorKind::Forbidden, - message: "Invalid username or password.".to_owned(), - }); - return Ok((false, uiaainfo)); - } - } - - // Password was correct! Let's add it to `completed` - uiaainfo.completed.push(AuthType::Password); - } - IncomingAuthData::Dummy(_) => { - uiaainfo.completed.push(AuthType::Dummy); - } - k => error!("type not supported: {:?}", k), - } - - // Check if a flow now succeeds - let mut completed = false; - 'flows: for flow in &mut uiaainfo.flows { - for stage in &flow.stages { - if !uiaainfo.completed.contains(stage) { - continue 'flows; - } - } - // We didn't break, so this flow succeeded! - completed = true; - } - - if !completed { - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - Some(&uiaainfo), - )?; - return Ok((false, uiaainfo)); - } - - // UIAA was successful! Remove this session and return true - self.update_uiaa_session( - user_id, - device_id, - uiaainfo.session.as_ref().expect("session is always set"), - None, - )?; - Ok((true, uiaainfo)) - } - - fn set_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - request: &CanonicalJsonValue, - ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - self.userdevicesessionid_uiaarequest.insert( - &userdevicesessionid, - &serde_json::to_vec(request).expect("json value to vec always works"), - )?; - - Ok(()) - } - - pub fn get_uiaa_request( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - ) -> Result> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - self.userdevicesessionid_uiaarequest - .get(&userdevicesessionid)? - .map(|bytes| { - serde_json::from_str::( - &utils::string_from_bytes(&bytes) - .map_err(|_| Error::bad_database("Invalid uiaa request bytes in db."))?, - ) - .map_err(|_| Error::bad_database("Invalid uiaa request in db.")) - }) - .transpose() - } - - fn update_uiaa_session( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - uiaainfo: Option<&UiaaInfo>, - ) -> Result<()> { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - if let Some(uiaainfo) = uiaainfo { - self.userdevicesessionid_uiaainfo.insert( - &userdevicesessionid, - &serde_json::to_vec(&uiaainfo).expect("UiaaInfo::to_vec always works"), - )?; - } else { - self.userdevicesessionid_uiaainfo - .remove(&userdevicesessionid)?; - } - - Ok(()) - } - - fn get_uiaa_session( - &self, - user_id: &UserId, - device_id: &DeviceId, - session: &str, - ) -> Result { - let mut userdevicesessionid = user_id.as_bytes().to_vec(); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(device_id.as_bytes()); - userdevicesessionid.push(0xff); - userdevicesessionid.extend_from_slice(session.as_bytes()); - - serde_json::from_slice( - &self - .userdevicesessionid_uiaainfo - .get(&userdevicesessionid)? - .ok_or(Error::BadRequest( - ErrorKind::Forbidden, - "UIAA session does not exist.", - ))?, - ) - .map_err(|_| Error::bad_database("UiaaInfo in userdeviceid_uiaainfo is invalid.")) - } -} diff --git a/src/lib.rs b/src/lib.rs index 82b8f340..3d7f7ae9 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,29 +7,25 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -pub mod appservice_server; -pub mod client_server; +pub mod api; +mod config; mod database; -mod error; -mod pdu; -mod ruma_wrapper; -pub mod server_server; +mod service; mod utils; -pub use database::{Config, Database}; -pub use error::{Error, Result}; -pub use pdu::PduEvent; -pub use rocket::Config as RocketConfig; -pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; -use std::ops::Deref; +use std::sync::RwLock; -pub struct State<'r, T: Send + Sync + 'static>(pub &'r T); +pub use api::ruma_wrapper::{Ruma, RumaResponse}; +pub use config::Config; +pub use database::KeyValueDatabase; +pub use service::{pdu::PduEvent, Services}; +pub use utils::error::{Error, Result}; -impl<'r, T: Send + Sync + 'static> Deref for State<'r, T> { - type Target = T; +pub static SERVICES: RwLock> = RwLock::new(None); - #[inline(always)] - fn deref(&self) -> &T { - self.0 - } +pub fn services<'a>() -> &'static Services { + SERVICES + .read() + .unwrap() + .expect("SERVICES should be initialized when this is called") } diff --git a/src/main.rs b/src/main.rs index 84dfb1fc..626de3ae 100644 --- a/src/main.rs +++ b/src/main.rs @@ -7,193 +7,51 @@ #![allow(clippy::suspicious_else_formatting)] #![deny(clippy::dbg_macro)] -pub mod appservice_server; -pub mod client_server; -pub mod server_server; - -mod database; -mod error; -mod pdu; -mod ruma_wrapper; -mod utils; - -use std::sync::Arc; - -use database::Config; -pub use database::Database; -pub use error::{Error, Result}; +use std::{future::Future, io, net::SocketAddr, time::Duration}; + +use axum::{ + extract::{FromRequest, MatchedPath}, + handler::Handler, + response::IntoResponse, + routing::{get, on, MethodFilter}, + Router, +}; +use axum_server::{bind, bind_rustls, tls_rustls::RustlsConfig, Handle as ServerHandle}; +use conduit::api::{client_server, server_server}; +use figment::{ + providers::{Env, Format, Toml}, + Figment, +}; +use http::{ + header::{self, HeaderName}, + Method, Uri, +}; use opentelemetry::trace::{FutureExt, Tracer}; -pub use pdu::PduEvent; -pub use rocket::State; -use ruma::api::client::error::ErrorKind; -pub use ruma_wrapper::{ConduitResult, Ruma, RumaResponse}; - -use rocket::{ - catch, catchers, - figment::{ - providers::{Env, Format, Toml}, - Figment, - }, - routes, Request, +use ruma::api::{client::error::ErrorKind, IncomingRequest}; +use tokio::signal; +use tower::ServiceBuilder; +use tower_http::{ + cors::{self, CorsLayer}, + trace::TraceLayer, + ServiceBuilderExt as _, }; -use tokio::sync::RwLock; +use tracing::{info, warn}; use tracing_subscriber::{prelude::*, EnvFilter}; -fn setup_rocket(config: Figment, data: Arc>) -> rocket::Rocket { - rocket::custom(config) - .manage(data) - .mount( - "/", - routes![ - client_server::get_supported_versions_route, - client_server::get_register_available_route, - client_server::register_route, - client_server::get_login_types_route, - client_server::login_route, - client_server::whoami_route, - client_server::logout_route, - client_server::logout_all_route, - client_server::change_password_route, - client_server::deactivate_route, - client_server::third_party_route, - client_server::get_capabilities_route, - client_server::get_pushrules_all_route, - client_server::set_pushrule_route, - client_server::get_pushrule_route, - client_server::set_pushrule_enabled_route, - client_server::get_pushrule_enabled_route, - client_server::get_pushrule_actions_route, - client_server::set_pushrule_actions_route, - client_server::delete_pushrule_route, - client_server::get_room_event_route, - client_server::get_room_aliases_route, - client_server::get_filter_route, - client_server::create_filter_route, - client_server::set_global_account_data_route, - client_server::set_room_account_data_route, - client_server::get_global_account_data_route, - client_server::get_room_account_data_route, - client_server::set_displayname_route, - client_server::get_displayname_route, - client_server::set_avatar_url_route, - client_server::get_avatar_url_route, - client_server::get_profile_route, - client_server::set_presence_route, - client_server::get_presence_route, - client_server::upload_keys_route, - client_server::get_keys_route, - client_server::claim_keys_route, - client_server::create_backup_route, - client_server::update_backup_route, - client_server::delete_backup_route, - client_server::get_latest_backup_route, - client_server::get_backup_route, - client_server::add_backup_key_sessions_route, - client_server::add_backup_keys_route, - client_server::delete_backup_key_session_route, - client_server::delete_backup_key_sessions_route, - client_server::delete_backup_keys_route, - client_server::get_backup_key_session_route, - client_server::get_backup_key_sessions_route, - client_server::get_backup_keys_route, - client_server::set_read_marker_route, - client_server::create_receipt_route, - client_server::create_typing_event_route, - client_server::create_room_route, - client_server::redact_event_route, - client_server::create_alias_route, - client_server::delete_alias_route, - client_server::get_alias_route, - client_server::join_room_by_id_route, - client_server::join_room_by_id_or_alias_route, - client_server::joined_members_route, - client_server::leave_room_route, - client_server::forget_room_route, - client_server::joined_rooms_route, - client_server::kick_user_route, - client_server::ban_user_route, - client_server::unban_user_route, - client_server::invite_user_route, - client_server::set_room_visibility_route, - client_server::get_room_visibility_route, - client_server::get_public_rooms_route, - client_server::get_public_rooms_filtered_route, - client_server::search_users_route, - client_server::get_member_events_route, - client_server::get_protocols_route, - client_server::send_message_event_route, - client_server::send_state_event_for_key_route, - client_server::send_state_event_for_empty_key_route, - client_server::get_state_events_route, - client_server::get_state_events_for_key_route, - client_server::get_state_events_for_empty_key_route, - client_server::sync_events_route, - client_server::get_context_route, - client_server::get_message_events_route, - client_server::search_events_route, - client_server::turn_server_route, - client_server::send_event_to_device_route, - client_server::get_media_config_route, - client_server::create_content_route, - client_server::get_content_route, - client_server::get_content_thumbnail_route, - client_server::get_devices_route, - client_server::get_device_route, - client_server::update_device_route, - client_server::delete_device_route, - client_server::delete_devices_route, - client_server::get_tags_route, - client_server::update_tag_route, - client_server::delete_tag_route, - client_server::options_route, - client_server::upload_signing_keys_route, - client_server::upload_signatures_route, - client_server::get_key_changes_route, - client_server::get_pushers_route, - client_server::set_pushers_route, - // client_server::third_party_route, - client_server::upgrade_room_route, - server_server::get_server_version_route, - server_server::get_server_keys_route, - server_server::get_server_keys_deprecated_route, - server_server::get_public_rooms_route, - server_server::get_public_rooms_filtered_route, - server_server::send_transaction_message_route, - server_server::get_event_route, - server_server::get_missing_events_route, - server_server::get_event_authorization_route, - server_server::get_room_state_route, - server_server::get_room_state_ids_route, - server_server::create_join_event_template_route, - server_server::create_join_event_v1_route, - server_server::create_join_event_v2_route, - server_server::create_invite_route, - server_server::get_devices_route, - server_server::get_room_information_route, - server_server::get_profile_information_route, - server_server::get_keys_route, - server_server::claim_keys_route, - ], - ) - .register( - "/", - catchers![ - not_found_catcher, - forbidden_catcher, - unknown_token_catcher, - missing_token_catcher, - bad_json_catcher - ], - ) -} +pub use conduit::*; // Re-export everything from the library crate -#[rocket::main] -async fn main() { - // Force log level off, so we can use our own logger - std::env::set_var("CONDUIT_LOG_LEVEL", "off"); +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] +use tikv_jemallocator::Jemalloc; +#[cfg(all(not(target_env = "msvc"), feature = "jemalloc"))] +#[global_allocator] +static GLOBAL: Jemalloc = Jemalloc; + +#[tokio::main] +async fn main() { + // Initialize DB let raw_config = - Figment::from(default_config()) + Figment::new() .merge( Toml::file(Env::var("CONDUIT_CONFIG").expect( "The CONDUIT_CONFIG env var needs to be set. Example: /etc/conduit.toml", @@ -202,8 +60,6 @@ async fn main() { ) .merge(Env::prefixed("CONDUIT_").global()); - std::env::set_var("RUST_LOG", "warn"); - let config = match raw_config.extract::() { Ok(s) => s, Err(e) => { @@ -212,33 +68,25 @@ async fn main() { } }; - let start = async { - config.warn_deprecated(); - - let db = match Database::load_or_create(&config).await { - Ok(db) => db, - Err(e) => { - eprintln!( - "The database couldn't be loaded or created. The following error occured: {}", - e - ); - std::process::exit(1); - } - }; + config.warn_deprecated(); - let rocket = setup_rocket(raw_config, Arc::clone(&db)) - .ignite() - .await - .unwrap(); + if let Err(e) = KeyValueDatabase::load_or_create(config).await { + eprintln!( + "The database couldn't be loaded or created. The following error occured: {}", + e + ); + std::process::exit(1); + }; - Database::start_on_shutdown_tasks(db, rocket.shutdown()).await; + let config = &services().globals.config; - rocket.launch().await.unwrap(); + let start = async { + run_server().await.unwrap(); }; if config.allow_jaeger { opentelemetry::global::set_text_map_propagator(opentelemetry_jaeger::Propagator::new()); - let tracer = opentelemetry_jaeger::new_pipeline() + let tracer = opentelemetry_jaeger::new_agent_pipeline() .install_batch(opentelemetry::runtime::Tokio) .unwrap(); @@ -249,8 +97,6 @@ async fn main() { println!("exporting"); opentelemetry::global::shutdown_tracer_provider(); } else { - std::env::set_var("RUST_LOG", &config.log); - let registry = tracing_subscriber::Registry::default(); if config.tracing_flame { let (flame_layer, _guard) = @@ -264,9 +110,13 @@ async fn main() { start.await; } else { let fmt_layer = tracing_subscriber::fmt::Layer::new(); - let filter_layer = EnvFilter::try_from_default_env() - .or_else(|_| EnvFilter::try_new("info")) - .unwrap(); + let filter_layer = match EnvFilter::try_new(&config.log) { + Ok(s) => s, + Err(e) => { + eprintln!("It looks like your log config is invalid. The following error occurred: {}", e); + EnvFilter::try_new("warn").unwrap() + } + }; let subscriber = registry.with(filter_layer).with(fmt_layer); tracing::subscriber::set_global_default(subscriber).unwrap(); @@ -275,57 +125,371 @@ async fn main() { } } -#[catch(404)] -fn not_found_catcher(_: &Request<'_>) -> String { - "404 Not Found".to_owned() +async fn run_server() -> io::Result<()> { + let config = &services().globals.config; + let addr = SocketAddr::from((config.address, config.port)); + + let x_requested_with = HeaderName::from_static("x-requested-with"); + + let middlewares = ServiceBuilder::new() + .sensitive_headers([header::AUTHORIZATION]) + .layer( + TraceLayer::new_for_http().make_span_with(|request: &http::Request<_>| { + let path = if let Some(path) = request.extensions().get::() { + path.as_str() + } else { + request.uri().path() + }; + + tracing::info_span!("http_request", %path) + }), + ) + .compression() + .layer(axum::middleware::from_fn(unrecognized_method)) + .layer( + CorsLayer::new() + .allow_origin(cors::Any) + .allow_methods([ + Method::GET, + Method::POST, + Method::PUT, + Method::DELETE, + Method::OPTIONS, + ]) + .allow_headers([ + header::ORIGIN, + x_requested_with, + header::CONTENT_TYPE, + header::ACCEPT, + header::AUTHORIZATION, + ]) + .max_age(Duration::from_secs(86400)), + ); + + let app = routes().layer(middlewares).into_make_service(); + let handle = ServerHandle::new(); + + tokio::spawn(shutdown_signal(handle.clone())); + + match &config.tls { + Some(tls) => { + let conf = RustlsConfig::from_pem_file(&tls.certs, &tls.key).await?; + bind_rustls(addr, conf).handle(handle).serve(app).await?; + } + None => { + bind(addr).handle(handle).serve(app).await?; + } + } + + // On shutdown + info!(target: "shutdown-sync", "Received shutdown notification, notifying sync helpers..."); + services().globals.rotate.fire(); + + Ok(()) } -#[catch(580)] -fn forbidden_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::Forbidden, "Forbidden.")) +async fn unrecognized_method( + req: axum::http::Request, + next: axum::middleware::Next, +) -> std::result::Result { + let method = req.method().clone(); + let uri = req.uri().clone(); + let inner = next.run(req).await; + if inner.status() == axum::http::StatusCode::METHOD_NOT_ALLOWED { + warn!("Method not allowed: {method} {uri}"); + return Ok( + Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request").into_response(), + ); + } + Ok(inner) } -#[catch(581)] -fn unknown_token_catcher() -> Result<()> { - Err(Error::BadRequest( - ErrorKind::UnknownToken { soft_logout: false }, - "Unknown token.", - )) +fn routes() -> Router { + Router::new() + .ruma_route(client_server::get_supported_versions_route) + .ruma_route(client_server::get_register_available_route) + .ruma_route(client_server::register_route) + .ruma_route(client_server::get_login_types_route) + .ruma_route(client_server::login_route) + .ruma_route(client_server::whoami_route) + .ruma_route(client_server::logout_route) + .ruma_route(client_server::logout_all_route) + .ruma_route(client_server::change_password_route) + .ruma_route(client_server::deactivate_route) + .ruma_route(client_server::third_party_route) + .ruma_route(client_server::get_capabilities_route) + .ruma_route(client_server::get_pushrules_all_route) + .ruma_route(client_server::set_pushrule_route) + .ruma_route(client_server::get_pushrule_route) + .ruma_route(client_server::set_pushrule_enabled_route) + .ruma_route(client_server::get_pushrule_enabled_route) + .ruma_route(client_server::get_pushrule_actions_route) + .ruma_route(client_server::set_pushrule_actions_route) + .ruma_route(client_server::delete_pushrule_route) + .ruma_route(client_server::get_room_event_route) + .ruma_route(client_server::get_room_aliases_route) + .ruma_route(client_server::get_filter_route) + .ruma_route(client_server::create_filter_route) + .ruma_route(client_server::set_global_account_data_route) + .ruma_route(client_server::set_room_account_data_route) + .ruma_route(client_server::get_global_account_data_route) + .ruma_route(client_server::get_room_account_data_route) + .ruma_route(client_server::set_displayname_route) + .ruma_route(client_server::get_displayname_route) + .ruma_route(client_server::set_avatar_url_route) + .ruma_route(client_server::get_avatar_url_route) + .ruma_route(client_server::get_profile_route) + .ruma_route(client_server::set_presence_route) + .ruma_route(client_server::get_presence_route) + .ruma_route(client_server::upload_keys_route) + .ruma_route(client_server::get_keys_route) + .ruma_route(client_server::claim_keys_route) + .ruma_route(client_server::create_backup_version_route) + .ruma_route(client_server::update_backup_version_route) + .ruma_route(client_server::delete_backup_version_route) + .ruma_route(client_server::get_latest_backup_info_route) + .ruma_route(client_server::get_backup_info_route) + .ruma_route(client_server::add_backup_keys_route) + .ruma_route(client_server::add_backup_keys_for_room_route) + .ruma_route(client_server::add_backup_keys_for_session_route) + .ruma_route(client_server::delete_backup_keys_for_room_route) + .ruma_route(client_server::delete_backup_keys_for_session_route) + .ruma_route(client_server::delete_backup_keys_route) + .ruma_route(client_server::get_backup_keys_for_room_route) + .ruma_route(client_server::get_backup_keys_for_session_route) + .ruma_route(client_server::get_backup_keys_route) + .ruma_route(client_server::set_read_marker_route) + .ruma_route(client_server::create_receipt_route) + .ruma_route(client_server::create_typing_event_route) + .ruma_route(client_server::create_room_route) + .ruma_route(client_server::redact_event_route) + .ruma_route(client_server::report_event_route) + .ruma_route(client_server::create_alias_route) + .ruma_route(client_server::delete_alias_route) + .ruma_route(client_server::get_alias_route) + .ruma_route(client_server::join_room_by_id_route) + .ruma_route(client_server::join_room_by_id_or_alias_route) + .ruma_route(client_server::joined_members_route) + .ruma_route(client_server::leave_room_route) + .ruma_route(client_server::forget_room_route) + .ruma_route(client_server::joined_rooms_route) + .ruma_route(client_server::kick_user_route) + .ruma_route(client_server::ban_user_route) + .ruma_route(client_server::unban_user_route) + .ruma_route(client_server::invite_user_route) + .ruma_route(client_server::set_room_visibility_route) + .ruma_route(client_server::get_room_visibility_route) + .ruma_route(client_server::get_public_rooms_route) + .ruma_route(client_server::get_public_rooms_filtered_route) + .ruma_route(client_server::search_users_route) + .ruma_route(client_server::get_member_events_route) + .ruma_route(client_server::get_protocols_route) + .ruma_route(client_server::send_message_event_route) + .ruma_route(client_server::send_state_event_for_key_route) + .ruma_route(client_server::get_state_events_route) + .ruma_route(client_server::get_state_events_for_key_route) + // Ruma doesn't have support for multiple paths for a single endpoint yet, and these routes + // share one Ruma request / response type pair with {get,send}_state_event_for_key_route + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + // These two endpoints allow trailing slashes + .route( + "/_matrix/client/r0/rooms/:room_id/state/:event_type/", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/state/:event_type/", + get(client_server::get_state_events_for_empty_key_route) + .put(client_server::send_state_event_for_empty_key_route), + ) + .ruma_route(client_server::sync_events_route) + .ruma_route(client_server::get_context_route) + .ruma_route(client_server::get_message_events_route) + .ruma_route(client_server::search_events_route) + .ruma_route(client_server::turn_server_route) + .ruma_route(client_server::send_event_to_device_route) + .ruma_route(client_server::get_media_config_route) + .ruma_route(client_server::create_content_route) + .ruma_route(client_server::get_content_route) + .ruma_route(client_server::get_content_as_filename_route) + .ruma_route(client_server::get_content_thumbnail_route) + .ruma_route(client_server::get_devices_route) + .ruma_route(client_server::get_device_route) + .ruma_route(client_server::update_device_route) + .ruma_route(client_server::delete_device_route) + .ruma_route(client_server::delete_devices_route) + .ruma_route(client_server::get_tags_route) + .ruma_route(client_server::update_tag_route) + .ruma_route(client_server::delete_tag_route) + .ruma_route(client_server::upload_signing_keys_route) + .ruma_route(client_server::upload_signatures_route) + .ruma_route(client_server::get_key_changes_route) + .ruma_route(client_server::get_pushers_route) + .ruma_route(client_server::set_pushers_route) + // .ruma_route(client_server::third_party_route) + .ruma_route(client_server::upgrade_room_route) + .ruma_route(server_server::get_server_version_route) + .route( + "/_matrix/key/v2/server", + get(server_server::get_server_keys_route), + ) + .route( + "/_matrix/key/v2/server/:key_id", + get(server_server::get_server_keys_deprecated_route), + ) + .ruma_route(server_server::get_public_rooms_route) + .ruma_route(server_server::get_public_rooms_filtered_route) + .ruma_route(server_server::send_transaction_message_route) + .ruma_route(server_server::get_event_route) + .ruma_route(server_server::get_missing_events_route) + .ruma_route(server_server::get_event_authorization_route) + .ruma_route(server_server::get_room_state_route) + .ruma_route(server_server::get_room_state_ids_route) + .ruma_route(server_server::create_join_event_template_route) + .ruma_route(server_server::create_join_event_v1_route) + .ruma_route(server_server::create_join_event_v2_route) + .ruma_route(server_server::create_invite_route) + .ruma_route(server_server::get_devices_route) + .ruma_route(server_server::get_room_information_route) + .ruma_route(server_server::get_profile_information_route) + .ruma_route(server_server::get_keys_route) + .ruma_route(server_server::claim_keys_route) + .route( + "/_matrix/client/r0/rooms/:room_id/initialSync", + get(initial_sync), + ) + .route( + "/_matrix/client/v3/rooms/:room_id/initialSync", + get(initial_sync), + ) + .fallback(not_found.into_service()) +} + +async fn shutdown_signal(handle: ServerHandle) { + let ctrl_c = async { + signal::ctrl_c() + .await + .expect("failed to install Ctrl+C handler"); + }; + + #[cfg(unix)] + let terminate = async { + signal::unix::signal(signal::unix::SignalKind::terminate()) + .expect("failed to install signal handler") + .recv() + .await; + }; + + #[cfg(not(unix))] + let terminate = std::future::pending::<()>(); + + let sig: &str; + + tokio::select! { + _ = ctrl_c => { sig = "Ctrl+C"; }, + _ = terminate => { sig = "SIGTERM"; }, + } + + warn!("Received {}, shutting down...", sig); + handle.graceful_shutdown(Some(Duration::from_secs(30))); } -#[catch(582)] -fn missing_token_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::MissingToken, "Missing token.")) +async fn not_found(uri: Uri) -> impl IntoResponse { + warn!("Not found: {uri}"); + Error::BadRequest(ErrorKind::Unrecognized, "Unrecognized request") } -#[catch(583)] -fn bad_json_catcher() -> Result<()> { - Err(Error::BadRequest(ErrorKind::BadJson, "Bad json.")) +async fn initial_sync(_uri: Uri) -> impl IntoResponse { + Error::BadRequest( + ErrorKind::GuestAccessForbidden, + "Guest access not implemented", + ) } -fn default_config() -> rocket::Config { - let mut config = rocket::Config::release_default(); +trait RouterExt { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static; +} +impl RouterExt for Router { + fn ruma_route(self, handler: H) -> Self + where + H: RumaHandler, + T: 'static, { - let mut shutdown = &mut config.shutdown; + handler.add_to_router(self) + } +} + +pub trait RumaHandler { + // Can't transform to a handler without boxing or relying on the nightly-only + // impl-trait-in-traits feature. Moving a small amount of extra logic into the trait + // allows bypassing both. + fn add_to_router(self, router: Router) -> Router; +} - #[cfg(unix)] +macro_rules! impl_ruma_handler { + ( $($ty:ident),* $(,)? ) => { + #[axum::async_trait] + #[allow(non_snake_case)] + impl RumaHandler<($($ty,)* Ruma,)> for F + where + Req: IncomingRequest + Send + 'static, + F: FnOnce($($ty,)* Ruma) -> Fut + Clone + Send + 'static, + Fut: Future> + + Send, + E: IntoResponse, + $( $ty: FromRequest + Send + 'static, )* { - use rocket::config::Sig; + fn add_to_router(self, mut router: Router) -> Router { + let meta = Req::METADATA; + let method_filter = method_to_filter(meta.method); - shutdown.signals.insert(Sig::Term); - shutdown.signals.insert(Sig::Int); - } + for path in IntoIterator::into_iter([meta.unstable_path, meta.r0_path, meta.stable_path]).flatten() { + let handler = self.clone(); - // Once shutdown is triggered, this is the amount of seconds before rocket - // will forcefully start shutting down connections, this gives enough time to /sync - // requests and the like (which havent gotten the memo, somehow) to still complete gracefully. - shutdown.grace = 35; + router = router.route(path, on(method_filter, |$( $ty: $ty, )* req| async move { + handler($($ty,)* req).await.map(RumaResponse) + })) + } - // After the grace period, rocket starts shutting down connections, and waits at least this - // many seconds before forcefully shutting all of them down. - shutdown.mercy = 10; - } + router + } + } + }; +} - config +impl_ruma_handler!(); +impl_ruma_handler!(T1); +impl_ruma_handler!(T1, T2); +impl_ruma_handler!(T1, T2, T3); +impl_ruma_handler!(T1, T2, T3, T4); +impl_ruma_handler!(T1, T2, T3, T4, T5); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7); +impl_ruma_handler!(T1, T2, T3, T4, T5, T6, T7, T8); + +fn method_to_filter(method: Method) -> MethodFilter { + match method { + Method::DELETE => MethodFilter::DELETE, + Method::GET => MethodFilter::GET, + Method::HEAD => MethodFilter::HEAD, + Method::OPTIONS => MethodFilter::OPTIONS, + Method::PATCH => MethodFilter::PATCH, + Method::POST => MethodFilter::POST, + Method::PUT => MethodFilter::PUT, + Method::TRACE => MethodFilter::TRACE, + m => panic!("Unsupported HTTP method: {:?}", m), + } } diff --git a/src/ruma_wrapper.rs b/src/ruma_wrapper.rs deleted file mode 100644 index 03c115cd..00000000 --- a/src/ruma_wrapper.rs +++ /dev/null @@ -1,405 +0,0 @@ -use crate::{database::DatabaseGuard, Error}; -use ruma::{ - api::{client::r0::uiaa::UiaaResponse, OutgoingResponse}, - identifiers::{DeviceId, UserId}, - signatures::CanonicalJsonValue, - Outgoing, ServerName, -}; -use std::ops::Deref; - -#[cfg(feature = "conduit_bin")] -use { - crate::server_server, - rocket::{ - data::{self, ByteUnit, Data, FromData}, - http::Status, - outcome::Outcome::*, - response::{self, Responder}, - tokio::io::AsyncReadExt, - Request, - }, - ruma::api::{AuthScheme, IncomingRequest}, - std::collections::BTreeMap, - std::convert::TryFrom, - std::io::Cursor, - tracing::{debug, warn}, -}; - -/// This struct converts rocket requests into ruma structs by converting them into http requests -/// first. -pub struct Ruma { - pub body: T::Incoming, - pub sender_user: Option, - pub sender_device: Option>, - pub sender_servername: Option>, - // This is None when body is not a valid string - pub json_body: Option, - pub from_appservice: bool, -} - -#[cfg(feature = "conduit_bin")] -#[rocket::async_trait] -impl<'a, T: Outgoing> FromData<'a> for Ruma -where - T::Incoming: IncomingRequest, -{ - type Error = (); - - #[tracing::instrument(skip(request, data))] - async fn from_data( - request: &'a Request<'_>, - data: Data<'a>, - ) -> data::Outcome<'a, Self, Self::Error> { - let metadata = T::Incoming::METADATA; - let db = request - .guard::() - .await - .expect("database was loaded"); - - // Get token from header or query value - let token = request - .headers() - .get_one("Authorization") - .and_then(|s| s.get(7..)) // Split off "Bearer " - .or_else(|| request.query_value("access_token").and_then(|r| r.ok())); - - let limit = db.globals.max_request_size(); - let mut handle = data.open(ByteUnit::Byte(limit.into())); - let mut body = Vec::new(); - if handle.read_to_end(&mut body).await.is_err() { - // Client disconnected - // Missing Token - return Failure((Status::new(582), ())); - } - - let mut json_body = serde_json::from_slice::(&body).ok(); - - let (sender_user, sender_device, sender_servername, from_appservice) = if let Some(( - _id, - registration, - )) = db - .appservice - .all() - .unwrap() - .iter() - .find(|(_id, registration)| { - registration - .get("as_token") - .and_then(|as_token| as_token.as_str()) - .map_or(false, |as_token| token.as_deref() == Some(as_token)) - }) { - match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - let user_id = request.query_value::("user_id").map_or_else( - || { - UserId::parse_with_server_name( - registration - .get("sender_localpart") - .unwrap() - .as_str() - .unwrap(), - db.globals.server_name(), - ) - .unwrap() - }, - |string| { - UserId::try_from(string.expect("parsing to string always works")) - .unwrap() - }, - ); - - if !db.users.exists(&user_id).unwrap() { - // Forbidden - return Failure((Status::new(580), ())); - } - - // TODO: Check if appservice is allowed to be that user - (Some(user_id), None, None, true) - } - AuthScheme::ServerSignatures => (None, None, None, true), - AuthScheme::None => (None, None, None, true), - } - } else { - match metadata.authentication { - AuthScheme::AccessToken | AuthScheme::QueryOnlyAccessToken => { - if let Some(token) = token { - match db.users.find_from_token(token).unwrap() { - // Unknown Token - None => return Failure((Status::new(581), ())), - Some((user_id, device_id)) => ( - Some(user_id), - Some(Box::::from(device_id)), - None, - false, - ), - } - } else { - // Missing Token - return Failure((Status::new(582), ())); - } - } - AuthScheme::ServerSignatures => { - // Get origin from header - let x_matrix = match request - .headers() - .get_one("Authorization") - .and_then(|s| s.get(9..)) // Split off "X-Matrix " and parse the rest - .map(|s| { - s.split_terminator(',') - .map(|field| { - let mut splits = field.splitn(2, '='); - (splits.next(), splits.next().map(|s| s.trim_matches('"'))) - }) - .collect::>() - }) { - Some(t) => t, - None => { - warn!("No Authorization header"); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let origin_str = match x_matrix.get(&Some("origin")) { - Some(Some(o)) => *o, - _ => { - warn!("Invalid X-Matrix header origin field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let origin = match Box::::try_from(origin_str) { - Ok(s) => s, - _ => { - warn!( - "Invalid server name in X-Matrix header origin field: {:?}", - x_matrix - ); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let key = match x_matrix.get(&Some("key")) { - Some(Some(k)) => *k, - _ => { - warn!("Invalid X-Matrix header key field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let sig = match x_matrix.get(&Some("sig")) { - Some(Some(s)) => *s, - _ => { - warn!("Invalid X-Matrix header sig field: {:?}", x_matrix); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let mut request_map = BTreeMap::::new(); - - if let Some(json_body) = &json_body { - request_map.insert("content".to_owned(), json_body.clone()); - }; - - request_map.insert( - "method".to_owned(), - CanonicalJsonValue::String(request.method().to_string()), - ); - request_map.insert( - "uri".to_owned(), - CanonicalJsonValue::String(request.uri().to_string()), - ); - request_map.insert( - "origin".to_owned(), - CanonicalJsonValue::String(origin.as_str().to_owned()), - ); - request_map.insert( - "destination".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - - let mut origin_signatures = BTreeMap::new(); - origin_signatures - .insert(key.to_owned(), CanonicalJsonValue::String(sig.to_owned())); - - let mut signatures = BTreeMap::new(); - signatures.insert( - origin.as_str().to_owned(), - CanonicalJsonValue::Object(origin_signatures), - ); - - request_map.insert( - "signatures".to_owned(), - CanonicalJsonValue::Object(signatures), - ); - - let keys = - match server_server::fetch_signing_keys(&db, &origin, vec![key.to_owned()]) - .await - { - Ok(b) => b, - Err(e) => { - warn!("Failed to fetch signing keys: {}", e); - - // Forbidden - return Failure((Status::new(580), ())); - } - }; - - let mut pub_key_map = BTreeMap::new(); - pub_key_map.insert(origin.as_str().to_owned(), keys); - - match ruma::signatures::verify_json(&pub_key_map, &request_map) { - Ok(()) => (None, None, Some(origin), false), - Err(e) => { - warn!( - "Failed to verify json request from {}: {}\n{:?}", - origin, e, request_map - ); - - if request.uri().to_string().contains('@') { - warn!("Request uri contained '@' character. Make sure your reverse proxy gives Conduit the raw uri (apache: use nocanon)"); - } - - // Forbidden - return Failure((Status::new(580), ())); - } - } - } - AuthScheme::None => (None, None, None, false), - } - }; - - let mut http_request = http::Request::builder() - .uri(request.uri().to_string()) - .method(&*request.method().to_string()); - for header in request.headers().iter() { - http_request = http_request.header(header.name.as_str(), &*header.value); - } - - if let Some(json_body) = json_body.as_mut().and_then(|val| val.as_object_mut()) { - let user_id = sender_user.clone().unwrap_or_else(|| { - UserId::parse_with_server_name("", db.globals.server_name()) - .expect("we know this is valid") - }); - - if let Some(CanonicalJsonValue::Object(initial_request)) = json_body - .get("auth") - .and_then(|auth| auth.as_object()) - .and_then(|auth| auth.get("session")) - .and_then(|session| session.as_str()) - .and_then(|session| { - db.uiaa - .get_uiaa_request( - &user_id, - &sender_device.clone().unwrap_or_else(|| "".into()), - session, - ) - .ok() - .flatten() - }) - { - for (key, value) in initial_request { - json_body.entry(key).or_insert(value); - } - } - body = serde_json::to_vec(json_body).expect("value to bytes can't fail"); - } - - let http_request = http_request.body(&*body).unwrap(); - debug!("{:?}", http_request); - match ::try_from_http_request(http_request) { - Ok(t) => Success(Ruma { - body: t, - sender_user, - sender_device, - sender_servername, - from_appservice, - json_body, - }), - Err(e) => { - warn!("{:?}", e); - // Bad Json - Failure((Status::new(583), ())) - } - } - } -} - -impl Deref for Ruma { - type Target = T::Incoming; - - fn deref(&self) -> &Self::Target { - &self.body - } -} - -/// This struct converts ruma responses into rocket http responses. -pub type ConduitResult = Result, Error>; - -pub fn response(response: RumaResponse) -> response::Result<'static> { - let http_response = response - .0 - .try_into_http_response::>() - .map_err(|_| Status::InternalServerError)?; - - let mut response = rocket::response::Response::build(); - - let status = http_response.status(); - response.status(Status::new(status.as_u16())); - - for header in http_response.headers() { - response.raw_header(header.0.to_string(), header.1.to_str().unwrap().to_owned()); - } - - let http_body = http_response.into_body(); - - response.sized_body(http_body.len(), Cursor::new(http_body)); - - response.raw_header("Access-Control-Allow-Origin", "*"); - response.raw_header( - "Access-Control-Allow-Methods", - "GET, POST, PUT, DELETE, OPTIONS", - ); - response.raw_header( - "Access-Control-Allow-Headers", - "Origin, X-Requested-With, Content-Type, Accept, Authorization", - ); - response.raw_header("Access-Control-Max-Age", "86400"); - response.ok() -} - -#[derive(Clone)] -pub struct RumaResponse(pub T); - -impl From for RumaResponse { - fn from(t: T) -> Self { - Self(t) - } -} - -impl From for RumaResponse { - fn from(t: Error) -> Self { - t.to_response() - } -} - -#[cfg(feature = "conduit_bin")] -impl<'r, 'o, T> Responder<'r, 'o> for RumaResponse -where - 'o: 'r, - T: OutgoingResponse, -{ - fn respond_to(self, _: &'r Request<'_>) -> response::Result<'o> { - response(self) - } -} diff --git a/src/server_server.rs b/src/server_server.rs deleted file mode 100644 index 68e262b4..00000000 --- a/src/server_server.rs +++ /dev/null @@ -1,3500 +0,0 @@ -use crate::{ - client_server::{self, claim_keys_helper, get_keys_helper}, - database::{rooms::CompressedStateEvent, DatabaseGuard}, - pdu::EventHash, - utils, ConduitResult, Database, Error, PduEvent, Result, Ruma, -}; -use get_profile_information::v1::ProfileField; -use http::header::{HeaderValue, AUTHORIZATION}; -use regex::Regex; -use rocket::{ - futures::{prelude::*, stream::FuturesUnordered}, - response::content::Json, -}; -use ruma::{ - api::{ - client::error::{Error as RumaError, ErrorKind}, - federation::{ - authorization::get_event_authorization, - device::get_devices::{self, v1::UserDevice}, - directory::{get_public_rooms, get_public_rooms_filtered}, - discovery::{ - get_remote_server_keys, get_remote_server_keys_batch, - get_remote_server_keys_batch::v2::QueryCriteria, get_server_keys, - get_server_version, ServerSigningKeys, VerifyKey, - }, - event::{get_event, get_missing_events, get_room_state, get_room_state_ids}, - keys::{claim_keys, get_keys}, - membership::{ - create_invite, - create_join_event::{self, RoomState}, - create_join_event_template, - }, - query::{get_profile_information, get_room_information}, - transactions::{ - edu::{DeviceListUpdateContent, DirectDeviceContent, Edu}, - send_transaction_message, - }, - }, - EndpointError, IncomingResponse, OutgoingRequest, OutgoingResponse, SendAccessToken, - }, - directory::{IncomingFilter, IncomingRoomNetwork}, - events::{ - receipt::{ReceiptEvent, ReceiptEventContent}, - room::{ - create::RoomCreateEventContent, - member::{MembershipState, RoomMemberEventContent}, - }, - AnyEphemeralRoomEvent, EventType, - }, - int, - receipt::ReceiptType, - serde::JsonObject, - signatures::{CanonicalJsonObject, CanonicalJsonValue}, - state_res::{self, RoomVersion, StateMap}, - to_device::DeviceIdOrAllDevices, - uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, ServerName, - ServerSigningKeyId, -}; -use serde_json::value::{to_raw_value, RawValue as RawJsonValue}; -use std::{ - collections::{btree_map, hash_map, BTreeMap, BTreeSet, HashMap, HashSet}, - convert::{TryFrom, TryInto}, - fmt::Debug, - future::Future, - mem, - net::{IpAddr, SocketAddr}, - pin::Pin, - sync::{Arc, RwLock, RwLockWriteGuard}, - time::{Duration, Instant, SystemTime}, -}; -use tokio::sync::{MutexGuard, Semaphore}; -use tracing::{debug, error, info, trace, warn}; - -#[cfg(feature = "conduit_bin")] -use rocket::{get, post, put}; - -/// Wraps either an literal IP address plus port, or a hostname plus complement -/// (colon-plus-port if it was specified). -/// -/// Note: A `FedDest::Named` might contain an IP address in string form if there -/// was no port specified to construct a SocketAddr with. -/// -/// # Examples: -/// ```rust,ignore -/// FedDest::Literal("198.51.100.3:8448".parse()?); -/// FedDest::Literal("[2001:db8::4:5]:443".parse()?); -/// FedDest::Named("matrix.example.org".to_owned(), "".to_owned()); -/// FedDest::Named("matrix.example.org".to_owned(), ":8448".to_owned()); -/// FedDest::Named("198.51.100.5".to_owned(), "".to_owned()); -/// ``` -#[derive(Clone, Debug, PartialEq)] -pub enum FedDest { - Literal(SocketAddr), - Named(String, String), -} - -impl FedDest { - fn into_https_string(self) -> String { - match self { - Self::Literal(addr) => format!("https://{}", addr), - Self::Named(host, port) => format!("https://{}{}", host, port), - } - } - - fn into_uri_string(self) -> String { - match self { - Self::Literal(addr) => addr.to_string(), - Self::Named(host, ref port) => host + port, - } - } - - fn hostname(&self) -> String { - match &self { - Self::Literal(addr) => addr.ip().to_string(), - Self::Named(host, _) => host.clone(), - } - } - - fn port(&self) -> Option { - match &self { - Self::Literal(addr) => Some(addr.port()), - Self::Named(_, port) => port[1..].parse().ok(), - } - } -} - -#[tracing::instrument(skip(globals, request))] -pub(crate) async fn send_request( - globals: &crate::database::globals::Globals, - destination: &ServerName, - request: T, -) -> Result -where - T: Debug, -{ - if !globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut write_destination_to_cache = false; - - let cached_result = globals - .actual_destination_cache - .read() - .unwrap() - .get(destination) - .cloned(); - - let (actual_destination, host) = if let Some(result) = cached_result { - result - } else { - write_destination_to_cache = true; - - let result = find_actual_destination(globals, destination).await; - - (result.0, result.1.into_uri_string()) - }; - - let actual_destination_str = actual_destination.clone().into_https_string(); - - let mut http_request = request - .try_into_http_request::>(&actual_destination_str, SendAccessToken::IfRequired("")) - .map_err(|e| { - warn!( - "Failed to find destination {}: {}", - actual_destination_str, e - ); - Error::BadServerResponse("Invalid destination") - })?; - - let mut request_map = serde_json::Map::new(); - - if !http_request.body().is_empty() { - request_map.insert( - "content".to_owned(), - serde_json::from_slice(http_request.body()) - .expect("body is valid json, we just created it"), - ); - }; - - request_map.insert("method".to_owned(), T::METADATA.method.to_string().into()); - request_map.insert( - "uri".to_owned(), - http_request - .uri() - .path_and_query() - .expect("all requests have a path") - .to_string() - .into(), - ); - request_map.insert("origin".to_owned(), globals.server_name().as_str().into()); - request_map.insert("destination".to_owned(), destination.as_str().into()); - - let mut request_json = - serde_json::from_value(request_map.into()).expect("valid JSON is valid BTreeMap"); - - ruma::signatures::sign_json( - globals.server_name().as_str(), - globals.keypair(), - &mut request_json, - ) - .expect("our request json is what ruma expects"); - - let request_json: serde_json::Map = - serde_json::from_slice(&serde_json::to_vec(&request_json).unwrap()).unwrap(); - - let signatures = request_json["signatures"] - .as_object() - .unwrap() - .values() - .map(|v| { - v.as_object() - .unwrap() - .iter() - .map(|(k, v)| (k, v.as_str().unwrap())) - }); - - for signature_server in signatures { - for s in signature_server { - http_request.headers_mut().insert( - AUTHORIZATION, - HeaderValue::from_str(&format!( - "X-Matrix origin={},key=\"{}\",sig=\"{}\"", - globals.server_name(), - s.0, - s.1 - )) - .unwrap(), - ); - } - } - - let reqwest_request = reqwest::Request::try_from(http_request) - .expect("all http requests are valid reqwest requests"); - - let url = reqwest_request.url().clone(); - - let mut client = globals.reqwest_client()?; - if let Some((override_name, port)) = globals - .tls_name_override - .read() - .unwrap() - .get(&actual_destination.hostname()) - { - client = client.resolve( - &actual_destination.hostname(), - SocketAddr::new(override_name[0], *port), - ); - // port will be ignored - } - - let response = client.build()?.execute(reqwest_request).await; - - match response { - Ok(mut response) => { - // reqwest::Response -> http::Response conversion - let status = response.status(); - let mut http_response_builder = http::Response::builder() - .status(status) - .version(response.version()); - mem::swap( - response.headers_mut(), - http_response_builder - .headers_mut() - .expect("http::response::Builder is usable"), - ); - - let body = response.bytes().await.unwrap_or_else(|e| { - warn!("server error {}", e); - Vec::new().into() - }); // TODO: handle timeout - - if status != 200 { - warn!( - "{} {}: {}", - url, - status, - String::from_utf8_lossy(&body) - .lines() - .collect::>() - .join(" ") - ); - } - - let http_response = http_response_builder - .body(body) - .expect("reqwest body is valid http body"); - - if status == 200 { - let response = T::IncomingResponse::try_from_http_response(http_response); - if response.is_ok() && write_destination_to_cache { - globals.actual_destination_cache.write().unwrap().insert( - Box::::from(destination), - (actual_destination, host), - ); - } - - response.map_err(|e| { - warn!( - "Invalid 200 response from {} on: {} {}", - &destination, url, e - ); - Error::BadServerResponse("Server returned bad 200 response.") - }) - } else { - Err(Error::FederationError( - destination.to_owned(), - RumaError::try_from_http_response(http_response).map_err(|e| { - warn!( - "Invalid {} response from {} on: {} {}", - status, &destination, url, e - ); - Error::BadServerResponse("Server returned bad error response.") - })?, - )) - } - } - Err(e) => Err(e.into()), - } -} - -#[tracing::instrument] -fn get_ip_with_port(destination_str: &str) -> Option { - if let Ok(destination) = destination_str.parse::() { - Some(FedDest::Literal(destination)) - } else if let Ok(ip_addr) = destination_str.parse::() { - Some(FedDest::Literal(SocketAddr::new(ip_addr, 8448))) - } else { - None - } -} - -#[tracing::instrument] -fn add_port_to_hostname(destination_str: &str) -> FedDest { - let (host, port) = match destination_str.find(':') { - None => (destination_str, ":8448"), - Some(pos) => destination_str.split_at(pos), - }; - FedDest::Named(host.to_owned(), port.to_owned()) -} - -/// Returns: actual_destination, host header -/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names -/// Numbers in comments below refer to bullet points in linked section of specification -#[tracing::instrument(skip(globals))] -async fn find_actual_destination( - globals: &crate::database::globals::Globals, - destination: &'_ ServerName, -) -> (FedDest, FedDest) { - let destination_str = destination.as_str().to_owned(); - let mut hostname = destination_str.clone(); - let actual_destination = match get_ip_with_port(&destination_str) { - Some(host_port) => { - // 1: IP literal with provided or default port - host_port - } - None => { - if let Some(pos) = destination_str.find(':') { - // 2: Hostname with included port - let (host, port) = destination_str.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - match request_well_known(globals, destination.as_str()).await { - // 3: A .well-known file is available - Some(delegated_hostname) => { - hostname = add_port_to_hostname(&delegated_hostname).into_uri_string(); - match get_ip_with_port(&delegated_hostname) { - Some(host_and_port) => host_and_port, // 3.1: IP literal in .well-known file - None => { - if let Some(pos) = delegated_hostname.find(':') { - // 3.2: Hostname with port in .well-known file - let (host, port) = delegated_hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - // Delegated hostname has no port in this branch - if let Some(hostname_override) = - query_srv_record(globals, &delegated_hostname).await - { - // 3.3: SRV lookup successful - let force_port = hostname_override.port(); - - if let Ok(override_ip) = globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - globals.tls_name_override.write().unwrap().insert( - delegated_hostname.clone(), - ( - override_ip.iter().collect(), - force_port.unwrap_or(8448), - ), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named( - delegated_hostname, - format!(":{}", port.to_string()), - ) - } else { - add_port_to_hostname(&delegated_hostname) - } - } else { - // 3.4: No SRV records, just use the hostname from .well-known - add_port_to_hostname(&delegated_hostname) - } - } - } - } - } - // 4: No .well-known or an error occured - None => { - match query_srv_record(globals, &destination_str).await { - // 4: SRV record found - Some(hostname_override) => { - let force_port = hostname_override.port(); - - if let Ok(override_ip) = globals - .dns_resolver() - .lookup_ip(hostname_override.hostname()) - .await - { - globals.tls_name_override.write().unwrap().insert( - hostname.clone(), - (override_ip.iter().collect(), force_port.unwrap_or(8448)), - ); - } else { - warn!("Using SRV record, but could not resolve to IP"); - } - - if let Some(port) = force_port { - FedDest::Named( - hostname.clone(), - format!(":{}", port.to_string()), - ) - } else { - add_port_to_hostname(&hostname) - } - } - // 5: No SRV record found - None => add_port_to_hostname(&destination_str), - } - } - } - } - } - }; - - // Can't use get_ip_with_port here because we don't want to add a port - // to an IP address if it wasn't specified - let hostname = if let Ok(addr) = hostname.parse::() { - FedDest::Literal(addr) - } else if let Ok(addr) = hostname.parse::() { - FedDest::Named(addr.to_string(), ":8448".to_owned()) - } else if let Some(pos) = hostname.find(':') { - let (host, port) = hostname.split_at(pos); - FedDest::Named(host.to_owned(), port.to_owned()) - } else { - FedDest::Named(hostname, ":8448".to_owned()) - }; - (actual_destination, hostname) -} - -#[tracing::instrument(skip(globals))] -async fn query_srv_record( - globals: &crate::database::globals::Globals, - hostname: &'_ str, -) -> Option { - if let Ok(Some(host_port)) = globals - .dns_resolver() - .srv_lookup(format!("_matrix._tcp.{}", hostname)) - .await - .map(|srv| { - srv.iter().next().map(|result| { - FedDest::Named( - result.target().to_string().trim_end_matches('.').to_owned(), - format!(":{}", result.port()), - ) - }) - }) - { - Some(host_port) - } else { - None - } -} - -#[tracing::instrument(skip(globals))] -async fn request_well_known( - globals: &crate::database::globals::Globals, - destination: &str, -) -> Option { - let body: serde_json::Value = serde_json::from_str( - &globals - .reqwest_client() - .ok()? - .build() - .ok()? - .get(&format!( - "https://{}/.well-known/matrix/server", - destination - )) - .send() - .await - .ok()? - .text() - .await - .ok()?, - ) - .ok()?; - Some(body.get("m.server")?.as_str()?.to_owned()) -} - -/// # `GET /_matrix/federation/v1/version` -/// -/// Get version information on this server. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/federation/v1/version"))] -#[tracing::instrument(skip(db))] -pub fn get_server_version_route( - db: DatabaseGuard, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - Ok(get_server_version::v1::Response { - server: Some(get_server_version::v1::Server { - name: Some("Conduit".to_owned()), - version: Some(env!("CARGO_PKG_VERSION").to_owned()), - }), - } - .into()) -} - -/// # `GET /_matrix/key/v2/server` -/// -/// Gets the public signing keys of this server. -/// -/// - Matrix does not support invalidating public keys, so the key returned by this will be valid -/// forever. -// Response type for this endpoint is Json because we need to calculate a signature for the response -#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server"))] -#[tracing::instrument(skip(db))] -pub fn get_server_keys_route(db: DatabaseGuard) -> Json { - if !db.globals.allow_federation() { - // TODO: Use proper types - return Json("Federation is disabled.".to_owned()); - } - - let mut verify_keys = BTreeMap::new(); - verify_keys.insert( - ServerSigningKeyId::try_from( - format!("ed25519:{}", db.globals.keypair().version()).as_str(), - ) - .expect("found invalid server signing keys in DB"), - VerifyKey { - key: base64::encode_config(db.globals.keypair().public_key(), base64::STANDARD_NO_PAD), - }, - ); - let mut response = serde_json::from_slice( - get_server_keys::v2::Response { - server_key: ServerSigningKeys { - server_name: db.globals.server_name().to_owned(), - verify_keys, - old_verify_keys: BTreeMap::new(), - signatures: BTreeMap::new(), - valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(86400 * 7), - ) - .expect("time is valid"), - }, - } - .try_into_http_response::>() - .unwrap() - .body(), - ) - .unwrap(); - - ruma::signatures::sign_json( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut response, - ) - .unwrap(); - - Json(serde_json::to_string(&response).expect("JSON is canonical")) -} - -/// # `GET /_matrix/key/v2/server/{keyId}` -/// -/// Gets the public signing keys of this server. -/// -/// - Matrix does not support invalidating public keys, so the key returned by this will be valid -/// forever. -#[cfg_attr(feature = "conduit_bin", get("/_matrix/key/v2/server/<_>"))] -#[tracing::instrument(skip(db))] -pub fn get_server_keys_deprecated_route(db: DatabaseGuard) -> Json { - get_server_keys_route(db) -} - -/// # `POST /_matrix/federation/v1/publicRooms` -/// -/// Lists the public rooms on this server. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/publicRooms", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_public_rooms_filtered_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let response = client_server::get_public_rooms_filtered_helper( - &db, - None, - body.limit, - body.since.as_deref(), - &body.filter, - &body.room_network, - ) - .await? - .0; - - Ok(get_public_rooms_filtered::v1::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c).expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - } - .into()) -} - -/// # `GET /_matrix/federation/v1/publicRooms` -/// -/// Lists the public rooms on this server. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/publicRooms", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_public_rooms_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let response = client_server::get_public_rooms_filtered_helper( - &db, - None, - body.limit, - body.since.as_deref(), - &IncomingFilter::default(), - &IncomingRoomNetwork::Matrix, - ) - .await? - .0; - - Ok(get_public_rooms::v1::Response { - chunk: response - .chunk - .into_iter() - .map(|c| { - // Convert ruma::api::federation::directory::get_public_rooms::v1::PublicRoomsChunk - // to ruma::api::client::r0::directory::PublicRoomsChunk - serde_json::from_str( - &serde_json::to_string(&c).expect("PublicRoomsChunk::to_string always works"), - ) - .expect("federation and client-server PublicRoomsChunk are the same type") - }) - .collect(), - prev_batch: response.prev_batch, - next_batch: response.next_batch, - total_room_count_estimate: response.total_room_count_estimate, - } - .into()) -} - -/// # `PUT /_matrix/federation/v1/send/{txnId}` -/// -/// Push EDUs and PDUs to this server. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v1/send/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn send_transaction_message_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut resolved_map = BTreeMap::new(); - - let pub_key_map = RwLock::new(BTreeMap::new()); - - // This is all the auth_events that have been recursively fetched so they don't have to be - // deserialized over and over again. - // TODO: make this persist across requests but not in a DB Tree (in globals?) - // TODO: This could potentially also be some sort of trie (suffix tree) like structure so - // that once an auth event is known it would know (using indexes maybe) all of the auth - // events that it references. - // let mut auth_cache = EventMap::new(); - - for pdu in &body.pdus { - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - continue; - } - }; - - // 0. Check the server is in the room - let room_id = match value - .get("room_id") - .and_then(|id| RoomId::try_from(id.as_str()?).ok()) - { - Some(id) => id, - None => { - // Event is invalid - resolved_map.insert(event_id, Err("Event needs a valid RoomId.".to_owned())); - continue; - } - }; - - let mutex = Arc::clone( - db.globals - .roomid_mutex_federation - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let start_time = Instant::now(); - resolved_map.insert( - event_id.clone(), - handle_incoming_pdu( - &body.origin, - &event_id, - &room_id, - value, - true, - &db, - &pub_key_map, - ) - .await - .map(|_| ()), - ); - drop(mutex_lock); - - let elapsed = start_time.elapsed(); - warn!( - "Handling transaction of event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - - for pdu in &resolved_map { - if let Err(e) = pdu.1 { - if e != "Room is unknown to this server." { - warn!("Incoming PDU failed {:?}", pdu); - } - } - } - - for edu in body - .edus - .iter() - .filter_map(|edu| serde_json::from_str::(edu.json().get()).ok()) - { - match edu { - Edu::Presence(_) => {} - Edu::Receipt(receipt) => { - for (room_id, room_updates) in receipt.receipts { - for (user_id, user_updates) in room_updates.read { - if let Some((event_id, _)) = user_updates - .event_ids - .iter() - .filter_map(|id| { - db.rooms.get_pdu_count(id).ok().flatten().map(|r| (id, r)) - }) - .max_by_key(|(_, count)| *count) - { - let mut user_receipts = BTreeMap::new(); - user_receipts.insert(user_id.clone(), user_updates.data); - - let mut receipts = BTreeMap::new(); - receipts.insert(ReceiptType::Read, user_receipts); - - let mut receipt_content = BTreeMap::new(); - receipt_content.insert(event_id.to_owned(), receipts); - - let event = AnyEphemeralRoomEvent::Receipt(ReceiptEvent { - content: ReceiptEventContent(receipt_content), - room_id: room_id.clone(), - }); - db.rooms.edus.readreceipt_update( - &user_id, - &room_id, - event, - &db.globals, - )?; - } else { - // TODO fetch missing events - debug!("No known event ids in read receipt: {:?}", user_updates); - } - } - } - } - Edu::Typing(typing) => { - if typing.typing { - db.rooms.edus.typing_add( - &typing.user_id, - &typing.room_id, - 3000 + utils::millis_since_unix_epoch(), - &db.globals, - )?; - } else { - db.rooms - .edus - .typing_remove(&typing.user_id, &typing.room_id, &db.globals)?; - } - } - Edu::DeviceListUpdate(DeviceListUpdateContent { user_id, .. }) => { - db.users - .mark_device_key_update(&user_id, &db.rooms, &db.globals)?; - } - Edu::DirectToDevice(DirectDeviceContent { - sender, - ev_type, - message_id, - messages, - }) => { - // Check if this is a new transaction id - if db - .transaction_ids - .existing_txnid(&sender, None, &message_id)? - .is_some() - { - continue; - } - - for (target_user_id, map) in &messages { - for (target_device_id_maybe, event) in map { - match target_device_id_maybe { - DeviceIdOrAllDevices::DeviceId(target_device_id) => { - db.users.add_to_device_event( - &sender, - target_user_id, - target_device_id, - &ev_type.to_string(), - event.deserialize_as().map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Event is invalid", - ) - })?, - &db.globals, - )? - } - - DeviceIdOrAllDevices::AllDevices => { - for target_device_id in db.users.all_device_ids(target_user_id) { - db.users.add_to_device_event( - &sender, - target_user_id, - &target_device_id?, - &ev_type.to_string(), - event.deserialize_as().map_err(|_| { - Error::BadRequest( - ErrorKind::InvalidParam, - "Event is invalid", - ) - })?, - &db.globals, - )?; - } - } - } - } - } - - // Save transaction id with empty data - db.transaction_ids - .add_txnid(&sender, None, &message_id, &[])?; - } - Edu::_Custom(_) => {} - } - } - - db.flush()?; - - Ok(send_transaction_message::v1::Response { pdus: resolved_map }.into()) -} - -/// An async function that can recursively call itself. -type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; - -/// When receiving an event one needs to: -/// 0. Check the server is in the room -/// 1. Skip the PDU if we already know about it -/// 2. Check signatures, otherwise drop -/// 3. Check content hash, redact if doesn't match -/// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not -/// timeline events -/// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are -/// also rejected "due to auth events" -/// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events -/// 7. Persist this event as an outlier -/// 8. If not timeline event: stop -/// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline -/// events -/// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities -/// doing all the checks in this list starting at 1. These are not timeline events -/// 11. Check the auth of the event passes based on the state of the event -/// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by -/// doing state res where one of the inputs was a previously trusted set of state, don't just -/// trust a set of state we got from a remote) -/// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" -/// it -/// 14. Use state resolution to find new room state -// We use some AsyncRecursiveType hacks here so we can call this async funtion recursively -#[tracing::instrument(skip(value, is_timeline_event, db, pub_key_map))] -pub(crate) async fn handle_incoming_pdu<'a>( - origin: &'a ServerName, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - is_timeline_event: bool, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> Result>, String> { - match db.rooms.exists(room_id) { - Ok(true) => {} - _ => { - return Err("Room is unknown to this server.".to_owned()); - } - } - - // 1. Skip the PDU if we already have it as a timeline event - if let Ok(Some(pdu_id)) = db.rooms.get_pdu_id(event_id) { - return Ok(Some(pdu_id.to_vec())); - } - - let create_event = db - .rooms - .room_state_get(room_id, &EventType::RoomCreate, "") - .map_err(|_| "Failed to ask database for event.".to_owned())? - .ok_or_else(|| "Failed to find create event in db.".to_owned())?; - - let first_pdu_in_room = db - .rooms - .first_pdu_in_room(room_id) - .map_err(|_| "Error loading first room event.".to_owned())? - .expect("Room exists"); - - let (incoming_pdu, val) = handle_outlier_pdu( - origin, - &create_event, - event_id, - room_id, - value, - db, - pub_key_map, - ) - .await?; - - // 8. if not timeline event: stop - if !is_timeline_event { - return Ok(None); - } - - if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - return Ok(None); - } - - // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events - let mut graph = HashMap::new(); - let mut eventid_info = HashMap::new(); - let mut todo_outlier_stack: Vec<_> = incoming_pdu - .prev_events - .iter() - .cloned() - .map(Arc::new) - .collect(); - - let mut amount = 0; - - while let Some(prev_event_id) = todo_outlier_stack.pop() { - if let Some((pdu, json_opt)) = fetch_and_handle_outliers( - db, - origin, - &[prev_event_id.clone()], - &create_event, - room_id, - pub_key_map, - ) - .await - .pop() - { - if amount > 100 { - // Max limit reached - warn!("Max prev event limit reached!"); - graph.insert((*prev_event_id).clone(), HashSet::new()); - continue; - } - - if let Some(json) = - json_opt.or_else(|| db.rooms.get_outlier_pdu_json(&prev_event_id).ok().flatten()) - { - if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { - amount += 1; - for prev_prev in &pdu.prev_events { - if !graph.contains_key(prev_prev) { - todo_outlier_stack.push(dbg!(Arc::new(prev_prev.clone()))); - } - } - - graph.insert( - (*prev_event_id).clone(), - pdu.prev_events.iter().cloned().collect(), - ); - } else { - // Time based check failed - graph.insert((*prev_event_id).clone(), HashSet::new()); - } - - eventid_info.insert(prev_event_id.clone(), (pdu, json)); - } else { - // Get json failed - graph.insert((*prev_event_id).clone(), HashSet::new()); - } - } else { - // Fetch and handle failed - graph.insert((*prev_event_id).clone(), HashSet::new()); - } - } - - let sorted = state_res::lexicographical_topological_sort(dbg!(&graph), |event_id| { - // This return value is the key used for sorting events, - // events are then sorted by power level, time, - // and lexically by event_id. - println!("{}", event_id); - Ok(( - int!(0), - MilliSecondsSinceUnixEpoch( - eventid_info - .get(event_id) - .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), - ), - ruma::event_id!("$notimportant"), - )) - }) - .map_err(|_| "Error sorting prev events".to_owned())?; - - let mut errors = 0; - for prev_id in dbg!(sorted) { - if errors >= 5 { - break; - } - if let Some((pdu, json)) = eventid_info.remove(&prev_id) { - if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { - continue; - } - - let start_time = Instant::now(); - let event_id = pdu.event_id.clone(); - if let Err(e) = upgrade_outlier_to_timeline_pdu( - pdu, - json, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await - { - errors += 1; - warn!("Prev event {} failed: {}", event_id, e); - } - let elapsed = start_time.elapsed(); - warn!( - "Handling prev event {} took {}m{}s", - event_id, - elapsed.as_secs() / 60, - elapsed.as_secs() % 60 - ); - } - } - - upgrade_outlier_to_timeline_pdu( - incoming_pdu, - val, - &create_event, - origin, - db, - room_id, - pub_key_map, - ) - .await -} - -#[tracing::instrument(skip(origin, create_event, event_id, room_id, value, db, pub_key_map))] -fn handle_outlier_pdu<'a>( - origin: &'a ServerName, - create_event: &'a PduEvent, - event_id: &'a EventId, - room_id: &'a RoomId, - value: BTreeMap, - db: &'a Database, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap), String>> { - Box::pin(async move { - // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - fetch_required_signing_keys(&value, pub_key_map, db) - .await - .map_err(|e| e.to_string())?; - - // 2. Check signatures, otherwise drop - // 3. check content hash, redact if doesn't match - - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; - - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - - let mut val = match ruma::signatures::verify_event( - &*pub_key_map.read().map_err(|_| "RwLock is poisoned.")?, - &value, - room_version_id, - ) { - Err(e) => { - // Drop - warn!("Dropping bad event {}: {}", event_id, e); - return Err("Signature verification failed".to_owned()); - } - Ok(ruma::signatures::Verified::Signatures) => { - // Redact - warn!("Calculated hash does not match: {}", event_id); - match ruma::signatures::redact(&value, room_version_id) { - Ok(obj) => obj, - Err(_) => return Err("Redaction failed".to_owned()), - } - } - Ok(ruma::signatures::Verified::All) => value, - }; - - // Now that we have checked the signature and hashes we can add the eventID and convert - // to our PduEvent type - val.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.as_str().to_owned()), - ); - let incoming_pdu = serde_json::from_value::( - serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), - ) - .map_err(|_| "Event is not a valid PDU.".to_owned())?; - - // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events - // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" - // EDIT: Step 5 is not applied anymore because it failed too often - warn!("Fetching auth events for {}", incoming_pdu.event_id); - fetch_and_handle_outliers( - db, - origin, - &incoming_pdu - .auth_events - .iter() - .cloned() - .map(Arc::new) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; - - // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events - debug!( - "Auth check for {} based on auth events", - incoming_pdu.event_id - ); - - // Build map of auth events - let mut auth_events = HashMap::new(); - for id in &incoming_pdu.auth_events { - let auth_event = match db.rooms.get_pdu(id).map_err(|e| e.to_string())? { - Some(e) => e, - None => { - warn!("Could not find auth event {}", id); - continue; - } - }; - - match auth_events.entry(( - auth_event.kind.clone(), - auth_event - .state_key - .clone() - .expect("all auth events have state keys"), - )) { - hash_map::Entry::Vacant(v) => { - v.insert(auth_event); - } - hash_map::Entry::Occupied(_) => { - return Err( - "Auth event's type and state_key combination exists multiple times." - .to_owned(), - ) - } - } - } - - // The original create event must be in the auth events - if auth_events - .get(&(EventType::RoomCreate, "".to_owned())) - .map(|a| a.as_ref()) - != Some(create_event) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } - - // If the previous event was the create event special rules apply - let previous_create = if incoming_pdu.auth_events.len() == 1 - && incoming_pdu.prev_events == incoming_pdu.auth_events - { - db.rooms - .get_pdu(&incoming_pdu.auth_events[0]) - .map_err(|e| e.to_string())? - .filter(|maybe_create| **maybe_create == *create_event) - } else { - None - }; - - if !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - previous_create.as_ref(), - None::, // TODO: third party invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed".to_owned())? - { - return Err("Event has failed auth check with auth events.".to_owned()); - } - - debug!("Validation successful."); - - // 7. Persist the event as an outlier. - db.rooms - .add_pdu_outlier(&incoming_pdu.event_id, &val) - .map_err(|_| "Failed to add pdu as outlier.".to_owned())?; - debug!("Added pdu as outlier."); - - Ok((Arc::new(incoming_pdu), val)) - }) -} - -#[tracing::instrument(skip(incoming_pdu, val, create_event, origin, db, room_id, pub_key_map))] -async fn upgrade_outlier_to_timeline_pdu( - incoming_pdu: Arc, - val: BTreeMap, - create_event: &PduEvent, - origin: &ServerName, - db: &Database, - room_id: &RoomId, - pub_key_map: &RwLock>>, -) -> Result>, String> { - if let Ok(Some(pduid)) = db.rooms.get_pdu_id(&incoming_pdu.event_id) { - return Ok(Some(pduid)); - } - - if db - .rooms - .is_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to ask db for soft fail".to_owned())? - { - return Err("Event has been soft failed".into()); - } - - let create_event_content: RoomCreateEventContent = - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - "Invalid create event in db.".to_owned() - })?; - - let room_version_id = &create_event_content.room_version; - let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); - - // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities - // doing all the checks in this list starting at 1. These are not timeline events. - - // TODO: if we know the prev_events of the incoming event we can avoid the request and build - // the state from a known point and resolve if > 1 prev_event - - debug!("Requesting state at event."); - let mut state_at_incoming_event = None; - - if incoming_pdu.prev_events.len() == 1 { - let prev_event = &incoming_pdu.prev_events[0]; - let prev_event_sstatehash = db - .rooms - .pdu_shortstatehash(prev_event) - .map_err(|_| "Failed talking to db".to_owned())?; - - let state = - prev_event_sstatehash.map(|shortstatehash| db.rooms.state_full_ids(shortstatehash)); - - if let Some(Ok(mut state)) = state { - warn!("Using cached state"); - let prev_pdu = - db.rooms.get_pdu(prev_event).ok().flatten().ok_or_else(|| { - "Could not find prev event, but we know the state.".to_owned() - })?; - - if let Some(state_key) = &prev_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&prev_pdu.kind, state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - state.insert(shortstatekey, Arc::new(prev_event.clone())); - // Now it's the state after the pdu - } - - state_at_incoming_event = Some(state); - } - } else { - warn!("Calculating state at event using state res"); - let mut extremity_sstatehashes = HashMap::new(); - - let mut okay = true; - for prev_eventid in &incoming_pdu.prev_events { - let prev_event = if let Ok(Some(pdu)) = db.rooms.get_pdu(prev_eventid) { - pdu - } else { - okay = false; - break; - }; - - let sstatehash = if let Ok(Some(s)) = db.rooms.pdu_shortstatehash(prev_eventid) { - s - } else { - okay = false; - break; - }; - - extremity_sstatehashes.insert(sstatehash, prev_event); - } - - if okay { - let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); - let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); - - for (sstatehash, prev_event) in extremity_sstatehashes { - let mut leaf_state: BTreeMap<_, _> = db - .rooms - .state_full_ids(sstatehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &prev_event.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&prev_event.kind, state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::new(prev_event.event_id.clone())); - // Now it's the state after the pdu - } - - let mut state = StateMap::with_capacity(leaf_state.len()); - let mut starting_events = Vec::with_capacity(leaf_state.len()); - - for (k, id) in leaf_state { - let k = db - .rooms - .get_statekey_from_short(k) - .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - - state.insert(k, (*id).clone()); - starting_events.push(id); - } - - auth_chain_sets.push( - get_auth_chain(room_id, starting_events, db) - .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).clone()) - .collect(), - ); - - fork_states.push(state); - } - - state_at_incoming_event = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { - Ok(new_state) => Some( - new_state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - Ok((shortstatekey, Arc::new(event_id))) - }) - .collect::>()?, - ), - Err(e) => { - warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); - None - } - }; - } - } - - if state_at_incoming_event.is_none() { - warn!("Calling /state_ids"); - // Call /state_ids to find out what the state at this pdu is. We trust the server's - // response to some extend, but we still do a lot of checks on the events - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_room_state_ids::v1::Request { - room_id, - event_id: &incoming_pdu.event_id, - }, - ) - .await - { - Ok(res) => { - warn!("Fetching state events at event."); - let state_vec = fetch_and_handle_outliers( - db, - origin, - &res.pdu_ids - .iter() - .cloned() - .map(Arc::new) - .collect::>(), - create_event, - room_id, - pub_key_map, - ) - .await; - - let mut state = BTreeMap::new(); - for (pdu, _) in state_vec { - let state_key = pdu - .state_key - .clone() - .ok_or_else(|| "Found non-state pdu in state events.".to_owned())?; - - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&pdu.kind, &state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - match state.entry(shortstatekey) { - btree_map::Entry::Vacant(v) => { - v.insert(Arc::new(pdu.event_id.clone())); - } - btree_map::Entry::Occupied(_) => return Err( - "State event's type and state_key combination exists multiple times." - .to_owned(), - ), - } - } - - // The original create event must still be in the state - let create_shortstatekey = db - .rooms - .get_shortstatekey(&EventType::RoomCreate, "") - .map_err(|_| "Failed to talk to db.")? - .expect("Room exists"); - - if state.get(&create_shortstatekey).map(|id| id.as_ref()) - != Some(&create_event.event_id) - { - return Err("Incoming event refers to wrong create event.".to_owned()); - } - - state_at_incoming_event = Some(state); - } - Err(e) => { - warn!("Fetching state for event failed: {}", e); - return Err("Fetching state for event failed".into()); - } - }; - } - - let state_at_incoming_event = - state_at_incoming_event.expect("we always set this to some above"); - - // 11. Check the auth of the event passes based on the state of the event - // If the previous event was the create event special rules apply - let previous_create = if incoming_pdu.auth_events.len() == 1 - && incoming_pdu.prev_events == incoming_pdu.auth_events - { - db.rooms - .get_pdu(&incoming_pdu.auth_events[0]) - .map_err(|e| e.to_string())? - .filter(|maybe_create| **maybe_create == *create_event) - } else { - None - }; - - let check_result = state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - previous_create.as_ref(), - None::, // TODO: third party invite - |k, s| { - db.rooms - .get_shortstatekey(k, s) - .ok() - .flatten() - .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) - .and_then(|event_id| db.rooms.get_pdu(event_id).ok().flatten()) - }, - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if !check_result { - return Err("Event has failed auth check with state at the event.".into()); - } - debug!("Auth check succeeded."); - - // We start looking at current room state now, so lets lock the room - - let mutex_state = Arc::clone( - db.globals - .roomid_mutex_state - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let state_lock = mutex_state.lock().await; - - // Now we calculate the set of extremities this room has after the incoming event has been - // applied. We start with the previous extremities (aka leaves) - let mut extremities = db - .rooms - .get_pdu_leaves(room_id) - .map_err(|_| "Failed to load room leaves".to_owned())?; - - // Remove any forward extremities that are referenced by this incoming event's prev_events - for prev_event in &incoming_pdu.prev_events { - if extremities.contains(prev_event) { - extremities.remove(prev_event); - } - } - - // Only keep those extremities were not referenced yet - extremities.retain(|id| !matches!(db.rooms.is_event_referenced(room_id, id), Ok(true))); - - let current_sstatehash = db - .rooms - .current_shortstatehash(room_id) - .map_err(|_| "Failed to load current state hash.".to_owned())? - .expect("every room has state"); - - let current_state_ids = db - .rooms - .state_full_ids(current_sstatehash) - .map_err(|_| "Failed to load room state.")?; - - let auth_events = db - .rooms - .get_auth_events( - room_id, - &incoming_pdu.kind, - &incoming_pdu.sender, - incoming_pdu.state_key.as_deref(), - &incoming_pdu.content, - ) - .map_err(|_| "Failed to get_auth_events.".to_owned())?; - - let state_ids_compressed = state_at_incoming_event - .iter() - .map(|(shortstatekey, id)| { - db.rooms - .compress_state_event(*shortstatekey, id, &db.globals) - .map_err(|_| "Failed to compress_state_event".to_owned()) - }) - .collect::>()?; - - // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it - debug!("starting soft fail auth check"); - - let soft_fail = !state_res::event_auth::auth_check( - &room_version, - &incoming_pdu, - previous_create.as_ref(), - None::, - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|_e| "Auth check failed.".to_owned())?; - - if soft_fail { - append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities, - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?; - - // Soft fail, we keep the event as an outlier but don't add it to the timeline - warn!("Event was soft failed: {:?}", incoming_pdu); - db.rooms - .mark_event_soft_failed(&incoming_pdu.event_id) - .map_err(|_| "Failed to set soft failed flag".to_owned())?; - return Err("Event has been soft failed".into()); - } - - if incoming_pdu.state_key.is_some() { - let mut extremity_sstatehashes = HashMap::new(); - - for id in dbg!(&extremities) { - match db - .rooms - .get_pdu(id) - .map_err(|_| "Failed to ask db for pdu.".to_owned())? - { - Some(leaf_pdu) => { - extremity_sstatehashes.insert( - db.rooms - .pdu_shortstatehash(&leaf_pdu.event_id) - .map_err(|_| "Failed to ask db for pdu state hash.".to_owned())? - .ok_or_else(|| { - error!( - "Found extremity pdu with no statehash in db: {:?}", - leaf_pdu - ); - "Found pdu with no statehash in db.".to_owned() - })?, - leaf_pdu, - ); - } - _ => { - error!("Missing state snapshot for {:?}", id); - return Err("Missing state snapshot.".to_owned()); - } - } - } - - let mut fork_states = Vec::new(); - - // 12. Ensure that the state is derived from the previous current state (i.e. we calculated - // by doing state res where one of the inputs was a previously trusted set of state, - // don't just trust a set of state we got from a remote). - - // We do this by adding the current state to the list of fork states - extremity_sstatehashes.remove(¤t_sstatehash); - fork_states.push(current_state_ids); - dbg!(&extremity_sstatehashes); - - for (sstatehash, leaf_pdu) in extremity_sstatehashes { - let mut leaf_state = db - .rooms - .state_full_ids(sstatehash) - .map_err(|_| "Failed to ask db for room state.".to_owned())?; - - if let Some(state_key) = &leaf_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&leaf_pdu.kind, state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - leaf_state.insert(shortstatekey, Arc::new(leaf_pdu.event_id.clone())); - // Now it's the state after the pdu - } - - fork_states.push(leaf_state); - } - - // We also add state after incoming event to the fork states - let mut state_after = state_at_incoming_event.clone(); - if let Some(state_key) = &incoming_pdu.state_key { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&incoming_pdu.kind, state_key, &db.globals) - .map_err(|_| "Failed to create shortstatekey.".to_owned())?; - - state_after.insert(shortstatekey, Arc::new(incoming_pdu.event_id.clone())); - } - fork_states.push(state_after); - - let mut update_state = false; - // 14. Use state resolution to find new room state - let new_room_state = if fork_states.is_empty() { - return Err("State is empty.".to_owned()); - } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { - // There was only one state, so it has to be the room's current state (because that is - // always included) - fork_states[0] - .iter() - .map(|(k, id)| { - db.rooms - .compress_state_event(*k, id, &db.globals) - .map_err(|_| "Failed to compress_state_event.".to_owned()) - }) - .collect::>()? - } else { - // We do need to force an update to this room's state - update_state = true; - - let mut auth_chain_sets = Vec::new(); - for state in &fork_states { - auth_chain_sets.push( - get_auth_chain( - room_id, - state.iter().map(|(_, id)| id.clone()).collect(), - db, - ) - .map_err(|_| "Failed to load auth chain.".to_owned())? - .map(|event_id| (*event_id).clone()) - .collect(), - ); - } - - let fork_states: Vec<_> = fork_states - .into_iter() - .map(|map| { - map.into_iter() - .map(|(k, id)| { - db.rooms - .get_statekey_from_short(k) - .map(|k| (k, (*id).clone())) - }) - .collect::>>() - }) - .collect::>() - .map_err(|_| "Failed to get_statekey_from_short.".to_owned())?; - - let state = match state_res::resolve( - room_version_id, - &fork_states, - auth_chain_sets, - |id| { - let res = db.rooms.get_pdu(id); - if let Err(e) = &res { - error!("LOOK AT ME Failed to fetch event: {}", e); - } - res.ok().flatten() - }, - ) { - Ok(new_state) => new_state, - Err(_) => { - return Err("State resolution failed, either an event could not be found or deserialization".into()); - } - }; - - state - .into_iter() - .map(|((event_type, state_key), event_id)| { - let shortstatekey = db - .rooms - .get_or_create_shortstatekey(&event_type, &state_key, &db.globals) - .map_err(|_| "Failed to get_or_create_shortstatekey".to_owned())?; - db.rooms - .compress_state_event(shortstatekey, &event_id, &db.globals) - .map_err(|_| "Failed to compress state event".to_owned()) - }) - .collect::>()? - }; - - // Set the new room state to the resolved state - if update_state { - db.rooms - .force_state(room_id, new_room_state, db) - .map_err(|_| "Failed to set new room state.".to_owned())?; - } - debug!("Updated resolved state"); - } - - extremities.insert(incoming_pdu.event_id.clone()); - - // Now that the event has passed all auth it is added into the timeline. - // We use the `state_at_event` instead of `state_after` so we accurately - // represent the state for this event. - - let pdu_id = append_incoming_pdu( - db, - &incoming_pdu, - val, - extremities, - state_ids_compressed, - soft_fail, - &state_lock, - ) - .map_err(|_| "Failed to add pdu to db.".to_owned())?; - - debug!("Appended incoming pdu."); - - // Event has passed all auth/stateres checks - drop(state_lock); - Ok(pdu_id) -} - -/// Find the event and auth it. Once the event is validated (steps 1 - 8) -/// it is appended to the outliers Tree. -/// -/// Returns pdu and if we fetched it over federation the raw json. -/// -/// a. Look in the main timeline (pduid_pdu tree) -/// b. Look at outlier pdu tree -/// c. Ask origin server over federation -/// d. TODO: Ask other servers over federation? -#[tracing::instrument(skip(db, origin, events, create_event, room_id, pub_key_map))] -pub(crate) fn fetch_and_handle_outliers<'a>( - db: &'a Database, - origin: &'a ServerName, - events: &'a [Arc], - create_event: &'a PduEvent, - room_id: &'a RoomId, - pub_key_map: &'a RwLock>>, -) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> { - Box::pin(async move { - let back_off = |id| match db.globals.bad_event_ratelimiter.write().unwrap().entry(id) { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - let mut pdus = vec![]; - for id in events { - if let Some((time, tries)) = db.globals.bad_event_ratelimiter.read().unwrap().get(id) { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - info!("Backing off from {}", id); - continue; - } - } - - // a. Look in the main timeline (pduid_pdu tree) - // b. Look at outlier pdu tree - // (get_pdu_json checks both) - let local_pdu = db.rooms.get_pdu(id); - let pdu = match local_pdu { - Ok(Some(pdu)) => { - trace!("Found {} in db", id); - (pdu, None) - } - Ok(None) => { - // c. Ask origin server over federation - warn!("Fetching {} over federation.", id); - match db - .sending - .send_federation_request( - &db.globals, - origin, - get_event::v1::Request { event_id: id }, - ) - .await - { - Ok(res) => { - warn!("Got {} over federation", id); - let (calculated_event_id, value) = - match crate::pdu::gen_event_id_canonical_json(&res.pdu) { - Ok(t) => t, - Err(_) => { - back_off((**id).clone()); - continue; - } - }; - - if calculated_event_id != **id { - warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", - id, calculated_event_id, &res.pdu); - } - - // This will also fetch the auth chain - match handle_outlier_pdu( - origin, - create_event, - id, - room_id, - value.clone(), - db, - pub_key_map, - ) - .await - { - Ok((pdu, json)) => (pdu, Some(json)), - Err(e) => { - warn!("Authentication of event {} failed: {:?}", id, e); - back_off((**id).clone()); - continue; - } - } - } - Err(_) => { - warn!("Failed to fetch event: {}", id); - back_off((**id).clone()); - continue; - } - } - } - Err(e) => { - warn!("Error loading {}: {}", id, e); - continue; - } - }; - pdus.push(pdu); - } - pdus - }) -} - -/// Search the DB for the signing keys of the given server, if we don't have them -/// fetch them from the server and save to our DB. -#[tracing::instrument(skip(db, origin, signature_ids))] -pub(crate) async fn fetch_signing_keys( - db: &Database, - origin: &ServerName, - signature_ids: Vec, -) -> Result> { - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let permit = db - .globals - .servername_ratelimiter - .read() - .unwrap() - .get(origin) - .map(|s| Arc::clone(s).acquire_owned()); - - let permit = match permit { - Some(p) => p, - None => { - let mut write = db.globals.servername_ratelimiter.write().unwrap(); - let s = Arc::clone( - write - .entry(origin.to_owned()) - .or_insert_with(|| Arc::new(Semaphore::new(1))), - ); - - s.acquire_owned() - } - } - .await; - - let back_off = |id| match db - .globals - .bad_signature_ratelimiter - .write() - .unwrap() - .entry(id) - { - hash_map::Entry::Vacant(e) => { - e.insert((Instant::now(), 1)); - } - hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), - }; - - if let Some((time, tries)) = db - .globals - .bad_signature_ratelimiter - .read() - .unwrap() - .get(&signature_ids) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {:?}", signature_ids); - return Err(Error::BadServerResponse("bad signature, still backing off")); - } - } - - trace!("Loading signing keys for {}", origin); - - let mut result: BTreeMap<_, _> = db - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if contains_all_ids(&result) { - return Ok(result); - } - - debug!("Fetching signing keys for {} over federation", origin); - - if let Ok(get_keys_response) = db - .sending - .send_federation_request(&db.globals, origin, get_server_keys::v2::Request::new()) - .await - { - db.globals - .add_signing_key(origin, get_keys_response.server_key.clone())?; - - result.extend( - get_keys_response - .server_key - .verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - get_keys_response - .server_key - .old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - - if contains_all_ids(&result) { - return Ok(result); - } - } - - for server in db.globals.trusted_servers() { - debug!("Asking {} for {}'s signing key", server, origin); - if let Ok(keys) = db - .sending - .send_federation_request( - &db.globals, - server, - get_remote_server_keys::v2::Request::new( - origin, - MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() - .checked_add(Duration::from_secs(3600)) - .expect("SystemTime to large"), - ) - .expect("time is valid"), - ), - ) - .await - { - trace!("Got signing keys: {:?}", keys); - for k in keys.server_keys { - db.globals.add_signing_key(origin, k.clone())?; - result.extend( - k.verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - result.extend( - k.old_verify_keys - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)), - ); - } - - if contains_all_ids(&result) { - return Ok(result); - } - } - } - - drop(permit); - - back_off(signature_ids); - - warn!("Failed to find public key for server: {}", origin); - Err(Error::BadServerResponse( - "Failed to find public key for server", - )) -} - -/// Append the incoming event setting the state snapshot to the state from the -/// server that sent the event. -#[tracing::instrument(skip(db, pdu, pdu_json, new_room_leaves, state_ids_compressed, _mutex_lock))] -fn append_incoming_pdu( - db: &Database, - pdu: &PduEvent, - pdu_json: CanonicalJsonObject, - new_room_leaves: HashSet, - state_ids_compressed: HashSet, - soft_fail: bool, - _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room mutex -) -> Result>> { - // We append to state before appending the pdu, so we don't have a moment in time with the - // pdu without it's state. This is okay because append_pdu can't fail. - db.rooms.set_event_state( - &pdu.event_id, - &pdu.room_id, - state_ids_compressed, - &db.globals, - )?; - - if soft_fail { - db.rooms - .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; - db.rooms.replace_pdu_leaves( - &pdu.room_id, - &new_room_leaves.into_iter().collect::>(), - )?; - return Ok(None); - } - - let pdu_id = db.rooms.append_pdu( - pdu, - pdu_json, - &new_room_leaves.into_iter().collect::>(), - db, - )?; - - for appservice in db.appservice.all()? { - if db.rooms.appservice_in_room(&pdu.room_id, &appservice, db)? { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - continue; - } - - if let Some(namespaces) = appservice.1.get("namespaces") { - let users = namespaces - .get("users") - .and_then(|users| users.as_sequence()) - .map_or_else(Vec::new, |users| { - users - .iter() - .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let aliases = namespaces - .get("aliases") - .and_then(|aliases| aliases.as_sequence()) - .map_or_else(Vec::new, |aliases| { - aliases - .iter() - .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) - .collect::>() - }); - let rooms = namespaces - .get("rooms") - .and_then(|rooms| rooms.as_sequence()); - - let matching_users = |users: &Regex| { - users.is_match(pdu.sender.as_str()) - || pdu.kind == EventType::RoomMember - && pdu - .state_key - .as_ref() - .map_or(false, |state_key| users.is_match(state_key)) - }; - let matching_aliases = |aliases: &Regex| { - db.rooms - .room_aliases(&pdu.room_id) - .filter_map(|r| r.ok()) - .any(|room_alias| aliases.is_match(room_alias.as_str())) - }; - - if aliases.iter().any(matching_aliases) - || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) - || users.iter().any(matching_users) - { - db.sending.send_pdu_appservice(&appservice.0, &pdu_id)?; - } - } - } - - Ok(Some(pdu_id)) -} - -#[tracing::instrument(skip(starting_events, db))] -pub(crate) fn get_auth_chain<'a>( - room_id: &RoomId, - starting_events: Vec>, - db: &'a Database, -) -> Result> + 'a> { - const NUM_BUCKETS: usize = 50; - - let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; - - for id in starting_events { - let short = db.rooms.get_or_create_shorteventid(&id, &db.globals)?; - let bucket_id = (short % NUM_BUCKETS as u64) as usize; - buckets[bucket_id].insert((short, id.clone())); - } - - let mut full_auth_chain = HashSet::new(); - - let mut hits = 0; - let mut misses = 0; - for chunk in buckets { - if chunk.is_empty() { - continue; - } - - let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&chunk_key)? { - hits += 1; - full_auth_chain.extend(cached.iter().copied()); - continue; - } - misses += 1; - - let mut chunk_cache = HashSet::new(); - let mut hits2 = 0; - let mut misses2 = 0; - for (sevent_id, event_id) in chunk { - if let Some(cached) = db.rooms.get_auth_chain_from_cache(&[sevent_id])? { - hits2 += 1; - chunk_cache.extend(cached.iter().copied()); - } else { - misses2 += 1; - let auth_chain = Arc::new(get_auth_chain_inner(room_id, &event_id, db)?); - db.rooms - .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; - println!( - "cache missed event {} with auth chain len {}", - event_id, - auth_chain.len() - ); - chunk_cache.extend(auth_chain.iter()); - }; - } - println!( - "chunk missed with len {}, event hits2: {}, misses2: {}", - chunk_cache.len(), - hits2, - misses2 - ); - let chunk_cache = Arc::new(chunk_cache); - db.rooms - .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; - full_auth_chain.extend(chunk_cache.iter()); - } - - println!( - "total: {}, chunk hits: {}, misses: {}", - full_auth_chain.len(), - hits, - misses - ); - - Ok(full_auth_chain - .into_iter() - .filter_map(move |sid| db.rooms.get_eventid_from_short(sid).ok())) -} - -#[tracing::instrument(skip(event_id, db))] -fn get_auth_chain_inner( - room_id: &RoomId, - event_id: &EventId, - db: &Database, -) -> Result> { - let mut todo = vec![event_id.clone()]; - let mut found = HashSet::new(); - - while let Some(event_id) = todo.pop() { - match db.rooms.get_pdu(&event_id) { - Ok(Some(pdu)) => { - if &pdu.room_id != room_id { - return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); - } - for auth_event in &pdu.auth_events { - let sauthevent = db - .rooms - .get_or_create_shorteventid(auth_event, &db.globals)?; - - if !found.contains(&sauthevent) { - found.insert(sauthevent); - todo.push(auth_event.clone()); - } - } - } - Ok(None) => { - warn!("Could not find pdu mentioned in auth events: {}", event_id); - } - Err(e) => { - warn!("Could not load event in auth chain: {} {}", event_id, e); - } - } - } - - Ok(found) -} - -/// # `GET /_matrix/federation/v1/event/{eventId}` -/// -/// Retrieves a single event from the server. -/// -/// - Only works if a user of this server is currently invited or joined the room -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/event/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn get_event_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let event = db - .rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; - - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = RoomId::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - if !db.rooms.server_in_room(sender_servername, &room_id)? { - return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); - } - - Ok(get_event::v1::Response { - origin: db.globals.server_name().to_owned(), - origin_server_ts: MilliSecondsSinceUnixEpoch::now(), - pdu: PduEvent::convert_to_outgoing_federation_event(event), - } - .into()) -} - -/// # `POST /_matrix/federation/v1/get_missing_events/{roomId}` -/// -/// Retrieves events that the sender is missing. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/get_missing_events/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn get_missing_events_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room", - )); - } - - let mut queued_events = body.latest_events.clone(); - let mut events = Vec::new(); - - let mut i = 0; - while i < queued_events.len() && events.len() < u64::from(body.limit) as usize { - if let Some(pdu) = db.rooms.get_pdu_json(&queued_events[i])? { - let room_id_str = pdu - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let event_room_id = RoomId::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - if event_room_id != body.room_id { - warn!( - "Evil event detected: Event {} found while searching in room {}", - queued_events[i], body.room_id - ); - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Evil event detected", - )); - } - - if body.earliest_events.contains(&queued_events[i]) { - i += 1; - continue; - } - queued_events.extend_from_slice( - &serde_json::from_value::>( - serde_json::to_value(pdu.get("prev_events").cloned().ok_or_else(|| { - Error::bad_database("Event in db has no prev_events field.") - })?) - .expect("canonical json is valid json value"), - ) - .map_err(|_| Error::bad_database("Invalid prev_events content in pdu in db."))?, - ); - events.push(PduEvent::convert_to_outgoing_federation_event(pdu)); - } - i += 1; - } - - Ok(get_missing_events::v1::Response { events }.into()) -} - -/// # `GET /_matrix/federation/v1/event_auth/{roomId}/{eventId}` -/// -/// Retrieves the auth chain for a given event. -/// -/// - This does not include the event itself -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/event_auth/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn get_event_authorization_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - let event = db - .rooms - .get_pdu_json(&body.event_id)? - .ok_or(Error::BadRequest(ErrorKind::NotFound, "Event not found."))?; - - let room_id_str = event - .get("room_id") - .and_then(|val| val.as_str()) - .ok_or_else(|| Error::bad_database("Invalid event in database"))?; - - let room_id = RoomId::try_from(room_id_str) - .map_err(|_| Error::bad_database("Invalid room id field in event in database"))?; - - if !db.rooms.server_in_room(sender_servername, &room_id)? { - return Err(Error::BadRequest(ErrorKind::NotFound, "Event not found.")); - } - - let auth_chain_ids = get_auth_chain(&room_id, vec![Arc::new(body.event_id.clone())], &db)?; - - Ok(get_event_authorization::v1::Response { - auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok()?) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - } - .into()) -} - -/// # `GET /_matrix/federation/v1/state/{roomId}` -/// -/// Retrieves the current state of the room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/state/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn get_room_state_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - let shortstatehash = db - .rooms - .pdu_shortstatehash(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pdus = db - .rooms - .state_full_ids(shortstatehash)? - .into_iter() - .map(|(_, id)| { - PduEvent::convert_to_outgoing_federation_event( - db.rooms.get_pdu_json(&id).unwrap().unwrap(), - ) - }) - .collect(); - - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; - - Ok(get_room_state::v1::Response { - auth_chain: auth_chain_ids - .map(|id| { - db.rooms.get_pdu_json(&id).map(|maybe_json| { - PduEvent::convert_to_outgoing_federation_event(maybe_json.unwrap()) - }) - }) - .filter_map(|r| r.ok()) - .collect(), - pdus, - } - .into()) -} - -/// # `GET /_matrix/federation/v1/state_ids/{roomId}` -/// -/// Retrieves the current state of the room. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/state_ids/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn get_room_state_ids_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let sender_servername = body - .sender_servername - .as_ref() - .expect("server is authenticated"); - - if !db.rooms.server_in_room(sender_servername, &body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Server is not in room.", - )); - } - - let shortstatehash = db - .rooms - .pdu_shortstatehash(&body.event_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pdu_ids = db - .rooms - .state_full_ids(shortstatehash)? - .into_iter() - .map(|(_, id)| (*id).clone()) - .collect(); - - let auth_chain_ids = get_auth_chain(&body.room_id, vec![Arc::new(body.event_id.clone())], &db)?; - - Ok(get_room_state_ids::v1::Response { - auth_chain_ids: auth_chain_ids.map(|id| (*id).clone()).collect(), - pdu_ids, - } - .into()) -} - -/// # `GET /_matrix/federation/v1/make_join/{roomId}/{userId}` -/// -/// Creates a join template. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/make_join/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn create_join_event_template_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - if !db.rooms.exists(&body.room_id)? { - return Err(Error::BadRequest( - ErrorKind::NotFound, - "Server is not in room.", - )); - } - - let prev_events: Vec<_> = db - .rooms - .get_pdu_leaves(&body.room_id)? - .into_iter() - .take(20) - .collect(); - - let create_event = db - .rooms - .room_state_get(&body.room_id, &EventType::RoomCreate, "")?; - - let create_event_content: Option = create_event - .as_ref() - .map(|create_event| { - serde_json::from_str(create_event.content.get()).map_err(|e| { - warn!("Invalid create event: {}", e); - Error::bad_database("Invalid create event in db.") - }) - }) - .transpose()?; - - let create_prev_event = if prev_events.len() == 1 - && Some(&prev_events[0]) == create_event.as_ref().map(|c| &c.event_id) - { - create_event - } else { - None - }; - - // If there was no create event yet, assume we are creating a version 6 room right now - let room_version_id = create_event_content.map_or(RoomVersionId::Version6, |create_event| { - create_event.room_version - }); - let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); - - if !body.ver.contains(&room_version_id) { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: room_version_id, - }, - "Room version not supported.", - )); - } - - let content = to_raw_value(&RoomMemberEventContent { - avatar_url: None, - blurhash: None, - displayname: None, - is_direct: None, - membership: MembershipState::Join, - third_party_invite: None, - reason: None, - }) - .expect("member event is valid value"); - - let state_key = body.user_id.to_string(); - let kind = EventType::RoomMember; - - let auth_events = db.rooms.get_auth_events( - &body.room_id, - &kind, - &body.user_id, - Some(&state_key), - &content, - )?; - - // Our depth is the maximum depth of prev_events + 1 - let depth = prev_events - .iter() - .filter_map(|event_id| Some(db.rooms.get_pdu(event_id).ok()??.depth)) - .max() - .unwrap_or_else(|| uint!(0)) - + uint!(1); - - let mut unsigned = BTreeMap::new(); - - if let Some(prev_pdu) = db.rooms.room_state_get(&body.room_id, &kind, &state_key)? { - unsigned.insert("prev_content".to_owned(), prev_pdu.content.clone()); - unsigned.insert( - "prev_sender".to_owned(), - serde_json::from_str(prev_pdu.sender.as_str()).expect("UserId is valid string"), - ); - } - - let pdu = PduEvent { - event_id: ruma::event_id!("$thiswillbefilledinlater"), - room_id: body.room_id.clone(), - sender: body.user_id.clone(), - origin_server_ts: utils::millis_since_unix_epoch() - .try_into() - .expect("time is valid"), - kind, - content, - state_key: Some(state_key), - prev_events, - depth, - auth_events: auth_events - .iter() - .map(|(_, pdu)| pdu.event_id.clone()) - .collect(), - redacts: None, - unsigned: if unsigned.is_empty() { - None - } else { - Some(to_raw_value(&unsigned).expect("to_raw_value always works")) - }, - hashes: EventHash { - sha256: "aaa".to_owned(), - }, - signatures: None, - }; - - let auth_check = state_res::auth_check( - &room_version, - &pdu, - create_prev_event, - None::, // TODO: third_party_invite - |k, s| auth_events.get(&(k.clone(), s.to_owned())), - ) - .map_err(|e| { - error!("{:?}", e); - Error::bad_database("Auth check failed.") - })?; - - if !auth_check { - return Err(Error::BadRequest( - ErrorKind::Forbidden, - "Event is not authorized.", - )); - } - - // Hash and sign - let mut pdu_json = - utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); - - pdu_json.remove("event_id"); - - // Add origin because synapse likes that (and it's required in the spec) - pdu_json.insert( - "origin".to_owned(), - CanonicalJsonValue::String(db.globals.server_name().as_str().to_owned()), - ); - - Ok(create_join_event_template::v1::Response { - room_version: Some(room_version_id), - event: to_raw_value(&pdu_json).expect("CanonicalJson can be serialized to JSON"), - } - .into()) -} - -async fn create_join_event( - db: &DatabaseGuard, - room_id: &RoomId, - pdu: &RawJsonValue, -) -> Result { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - // We need to return the state prior to joining, let's keep a reference to that here - let shortstatehash = db - .rooms - .current_shortstatehash(room_id)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Pdu state not found.", - ))?; - - let pub_key_map = RwLock::new(BTreeMap::new()); - // let mut auth_cache = EventMap::new(); - - // We do not add the event_id field to the pdu here because of signature and hashes checks - let (event_id, value) = match crate::pdu::gen_event_id_canonical_json(pdu) { - Ok(t) => t, - Err(_) => { - // Event could not be converted to canonical json - return Err(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not convert event to canonical json.", - )); - } - }; - - let origin: Box = serde_json::from_value( - serde_json::to_value(value.get("origin").ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event needs an origin field.", - ))?) - .expect("CanonicalJson is valid json value"), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Origin field is invalid."))?; - - let mutex = Arc::clone( - db.globals - .roomid_mutex_federation - .write() - .unwrap() - .entry(room_id.clone()) - .or_default(), - ); - let mutex_lock = mutex.lock().await; - let pdu_id = handle_incoming_pdu(&origin, &event_id, room_id, value, true, db, &pub_key_map) - .await - .map_err(|e| { - warn!("Error while handling incoming send join PDU: {}", e); - Error::BadRequest( - ErrorKind::InvalidParam, - "Error while handling incoming PDU.", - ) - })? - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Could not accept incoming PDU as timeline event.", - ))?; - drop(mutex_lock); - - let state_ids = db.rooms.state_full_ids(shortstatehash)?; - let auth_chain_ids = get_auth_chain( - room_id, - state_ids.iter().map(|(_, id)| id.clone()).collect(), - db, - )?; - - let servers = db - .rooms - .room_servers(room_id) - .filter_map(|r| r.ok()) - .filter(|server| &**server != db.globals.server_name()); - - db.sending.send_pdu(servers, &pdu_id)?; - - db.flush()?; - - Ok(RoomState { - auth_chain: auth_chain_ids - .filter_map(|id| db.rooms.get_pdu_json(&id).ok().flatten()) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - state: state_ids - .iter() - .filter_map(|(_, id)| db.rooms.get_pdu_json(id).ok().flatten()) - .map(PduEvent::convert_to_outgoing_federation_event) - .collect(), - }) -} - -/// # `PUT /_matrix/federation/v1/send_join/{roomId}/{eventId}` -/// -/// Submits a signed join event. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v1/send_join/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn create_join_event_v1_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; - - Ok(create_join_event::v1::Response { room_state }.into()) -} - -/// # `PUT /_matrix/federation/v2/send_join/{roomId}/{eventId}` -/// -/// Submits a signed join event. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v2/send_join/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn create_join_event_v2_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - let room_state = create_join_event(&db, &body.room_id, &body.pdu).await?; - - Ok(create_join_event::v2::Response { room_state }.into()) -} - -/// # `PUT /_matrix/federation/v2/invite/{roomId}/{eventId}` -/// -/// Invites a remote user to a room. -#[cfg_attr( - feature = "conduit_bin", - put("/_matrix/federation/v2/invite/<_>/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn create_invite_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - if body.room_version != RoomVersionId::Version5 && body.room_version != RoomVersionId::Version6 - { - return Err(Error::BadRequest( - ErrorKind::IncompatibleRoomVersion { - room_version: body.room_version.clone(), - }, - "Server does not support this room version.", - )); - } - - let mut signed_event = utils::to_canonical_object(&body.event) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invite event is invalid."))?; - - ruma::signatures::hash_and_sign_event( - db.globals.server_name().as_str(), - db.globals.keypair(), - &mut signed_event, - &body.room_version, - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Failed to sign event."))?; - - // Generate event id - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&signed_event, &body.room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - // Add event_id back - signed_event.insert( - "event_id".to_owned(), - CanonicalJsonValue::String(event_id.into()), - ); - - let sender = serde_json::from_value( - signed_event - .get("sender") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event had no sender field.", - ))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "sender is not a user id."))?; - - let invited_user = serde_json::from_value( - signed_event - .get("state_key") - .ok_or(Error::BadRequest( - ErrorKind::InvalidParam, - "Event had no state_key field.", - ))? - .clone() - .into(), - ) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "state_key is not a user id."))?; - - let mut invite_state = body.invite_room_state.clone(); - - let mut event: JsonObject = serde_json::from_str(body.event.get()) - .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event bytes."))?; - - event.insert("event_id".to_owned(), "$dummy".into()); - - let pdu: PduEvent = serde_json::from_value(event.into()).map_err(|e| { - warn!("Invalid invite event: {}", e); - Error::BadRequest(ErrorKind::InvalidParam, "Invalid invite event.") - })?; - - invite_state.push(pdu.to_stripped_state_event()); - - // If the room already exists, the remote server will notify us about the join via /send - if !db.rooms.exists(&pdu.room_id)? { - db.rooms.update_membership( - &body.room_id, - &invited_user, - MembershipState::Invite, - &sender, - Some(invite_state), - &db, - true, - )?; - } - - db.flush()?; - - Ok(create_invite::v2::Response { - event: PduEvent::convert_to_outgoing_federation_event(signed_event), - } - .into()) -} - -/// # `GET /_matrix/federation/v1/user/devices/{userId}` -/// -/// Gets information on all devices of the user. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/user/devices/<_>", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn get_devices_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - Ok(get_devices::v1::Response { - user_id: body.user_id.clone(), - stream_id: db - .users - .get_devicelist_version(&body.user_id)? - .unwrap_or(0) - .try_into() - .expect("version will not grow that large"), - devices: db - .users - .all_devices_metadata(&body.user_id) - .filter_map(|r| r.ok()) - .filter_map(|metadata| { - Some(UserDevice { - keys: db - .users - .get_device_keys(&body.user_id, &metadata.device_id) - .ok()??, - device_id: metadata.device_id, - device_display_name: metadata.display_name, - }) - }) - .collect(), - } - .into()) -} - -/// # `GET /_matrix/federation/v1/query/directory` -/// -/// Resolve a room alias to a room id. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/query/directory", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn get_room_information_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let room_id = db - .rooms - .id_from_alias(&body.room_alias)? - .ok_or(Error::BadRequest( - ErrorKind::NotFound, - "Room alias not found.", - ))?; - - Ok(get_room_information::v1::Response { - room_id, - servers: vec![db.globals.server_name().to_owned()], - } - .into()) -} - -/// # `GET /_matrix/federation/v1/query/profile` -/// -/// Gets information on a profile. -#[cfg_attr( - feature = "conduit_bin", - get("/_matrix/federation/v1/query/profile", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub fn get_profile_information_route( - db: DatabaseGuard, - body: Ruma>, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let mut displayname = None; - let mut avatar_url = None; - let mut blurhash = None; - - match &body.field { - Some(ProfileField::DisplayName) => displayname = db.users.displayname(&body.user_id)?, - Some(ProfileField::AvatarUrl) => { - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)? - } - // TODO: what to do with custom - Some(_) => {} - None => { - displayname = db.users.displayname(&body.user_id)?; - avatar_url = db.users.avatar_url(&body.user_id)?; - blurhash = db.users.blurhash(&body.user_id)?; - } - } - - Ok(get_profile_information::v1::Response { - blurhash, - displayname, - avatar_url, - } - .into()) -} - -/// # `POST /_matrix/federation/v1/user/keys/query` -/// -/// Gets devices and identity keys for the given users. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/user/keys/query", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn get_keys_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let result = get_keys_helper( - None, - &body.device_keys, - |u| Some(u.server_name()) == body.sender_servername.as_deref(), - &db, - ) - .await?; - - db.flush()?; - - Ok(get_keys::v1::Response { - device_keys: result.device_keys, - master_keys: result.master_keys, - self_signing_keys: result.self_signing_keys, - } - .into()) -} - -/// # `POST /_matrix/federation/v1/user/keys/claim` -/// -/// Claims one-time keys. -#[cfg_attr( - feature = "conduit_bin", - post("/_matrix/federation/v1/user/keys/claim", data = "") -)] -#[tracing::instrument(skip(db, body))] -pub async fn claim_keys_route( - db: DatabaseGuard, - body: Ruma, -) -> ConduitResult { - if !db.globals.allow_federation() { - return Err(Error::bad_config("Federation is disabled.")); - } - - let result = claim_keys_helper(&body.one_time_keys, &db).await?; - - db.flush()?; - - Ok(claim_keys::v1::Response { - one_time_keys: result.one_time_keys, - } - .into()) -} - -#[tracing::instrument(skip(event, pub_key_map, db))] -pub(crate) async fn fetch_required_signing_keys( - event: &BTreeMap, - pub_key_map: &RwLock>>, - db: &Database, -) -> Result<()> { - let signatures = event - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - // We go through all the signatures we see on the value and fetch the corresponding signing - // keys - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let fetch_res = fetch_signing_keys( - db, - &Box::::try_from(&**signature_server).map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?, - signature_ids, - ) - .await; - - let keys = match fetch_res { - Ok(keys) => keys, - Err(_) => { - warn!("Signature verification failed: Could not fetch signing key.",); - continue; - } - }; - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(signature_server.clone(), keys); - } - - Ok(()) -} - -// Gets a list of servers for which we don't have the signing key yet. We go over -// the PDUs and either cache the key or add it to the list that needs to be retrieved. -fn get_server_keys_from_cache( - pdu: &RawJsonValue, - servers: &mut BTreeMap, BTreeMap>, - room_version: &RoomVersionId, - pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, - db: &Database, -) -> Result<()> { - let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { - error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); - Error::BadServerResponse("Invalid PDU in server response") - })?; - - let event_id = EventId::try_from(&*format!( - "${}", - ruma::signatures::reference_hash(&value, room_version) - .expect("ruma can calculate reference hashes") - )) - .expect("ruma's reference hashes are valid event ids"); - - if let Some((time, tries)) = db - .globals - .bad_event_ratelimiter - .read() - .unwrap() - .get(&event_id) - { - // Exponential backoff - let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); - if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { - min_elapsed_duration = Duration::from_secs(60 * 60 * 24); - } - - if time.elapsed() < min_elapsed_duration { - debug!("Backing off from {}", event_id); - return Err(Error::BadServerResponse("bad event, still backing off")); - } - } - - let signatures = value - .get("signatures") - .ok_or(Error::BadServerResponse( - "No signatures in server response pdu.", - ))? - .as_object() - .ok_or(Error::BadServerResponse( - "Invalid signatures object in server response pdu.", - ))?; - - for (signature_server, signature) in signatures { - let signature_object = signature.as_object().ok_or(Error::BadServerResponse( - "Invalid signatures content object in server response pdu.", - ))?; - - let signature_ids = signature_object.keys().cloned().collect::>(); - - let contains_all_ids = - |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); - - let origin = &Box::::try_from(&**signature_server).map_err(|_| { - Error::BadServerResponse("Invalid servername in signatures of server response pdu.") - })?; - - if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { - continue; - } - - trace!("Loading signing keys for {}", origin); - - let result: BTreeMap<_, _> = db - .globals - .signing_keys_for(origin)? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - if !contains_all_ids(&result) { - trace!("Signing key not loaded for {}", origin); - servers.insert(origin.clone(), BTreeMap::new()); - } - - pub_key_map.insert(origin.to_string(), result); - } - - Ok(()) -} - -pub(crate) async fn fetch_join_signing_keys( - event: &create_join_event::v2::Response, - room_version: &RoomVersionId, - pub_key_map: &RwLock>>, - db: &Database, -) -> Result<()> { - let mut servers: BTreeMap, BTreeMap> = - BTreeMap::new(); - - { - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - - // Try to fetch keys, failure is okay - // Servers we couldn't find in the cache will be added to `servers` - for pdu in &event.room_state.state { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); - } - for pdu in &event.room_state.auth_chain { - let _ = get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm, db); - } - - drop(pkm); - } - - if servers.is_empty() { - // We had all keys locally - return Ok(()); - } - - for server in db.globals.trusted_servers() { - trace!("Asking batch signing keys from trusted server {}", server); - if let Ok(keys) = db - .sending - .send_federation_request( - &db.globals, - server, - get_remote_server_keys_batch::v2::Request { - server_keys: servers.clone(), - minimum_valid_until_ts: MilliSecondsSinceUnixEpoch::from_system_time( - SystemTime::now() + Duration::from_secs(60), - ) - .expect("time is valid"), - }, - ) - .await - { - trace!("Got signing keys: {:?}", keys); - let mut pkm = pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))?; - for k in keys.server_keys { - // TODO: Check signature from trusted server? - servers.remove(&k.server_name); - - let result = db - .globals - .add_signing_key(&k.server_name, k.clone())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect::>(); - - pkm.insert(k.server_name.to_string(), result); - } - } - - if servers.is_empty() { - return Ok(()); - } - } - - let mut futures: FuturesUnordered<_> = servers - .into_iter() - .map(|(server, _)| async move { - ( - db.sending - .send_federation_request( - &db.globals, - &server, - get_server_keys::v2::Request::new(), - ) - .await, - server, - ) - }) - .collect(); - - while let Some(result) = futures.next().await { - if let (Ok(get_keys_response), origin) = result { - let result: BTreeMap<_, _> = db - .globals - .add_signing_key(&origin, get_keys_response.server_key.clone())? - .into_iter() - .map(|(k, v)| (k.to_string(), v.key)) - .collect(); - - pub_key_map - .write() - .map_err(|_| Error::bad_database("RwLock is poisoned."))? - .insert(origin.to_string(), result); - } - } - - Ok(()) -} - -#[cfg(test)] -mod tests { - use super::{add_port_to_hostname, get_ip_with_port, FedDest}; - - #[test] - fn ips_get_default_ports() { - assert_eq!( - get_ip_with_port("1.1.1.1"), - Some(FedDest::Literal("1.1.1.1:8448".parse().unwrap())) - ); - assert_eq!( - get_ip_with_port("dead:beef::"), - Some(FedDest::Literal("[dead:beef::]:8448".parse().unwrap())) - ); - } - - #[test] - fn ips_keep_custom_ports() { - assert_eq!( - get_ip_with_port("1.1.1.1:1234"), - Some(FedDest::Literal("1.1.1.1:1234".parse().unwrap())) - ); - assert_eq!( - get_ip_with_port("[dead::beef]:8933"), - Some(FedDest::Literal("[dead::beef]:8933".parse().unwrap())) - ); - } - - #[test] - fn hostnames_get_default_ports() { - assert_eq!( - add_port_to_hostname("example.com"), - FedDest::Named(String::from("example.com"), String::from(":8448")) - ) - } - - #[test] - fn hostnames_keep_custom_ports() { - assert_eq!( - add_port_to_hostname("example.com:1337"), - FedDest::Named(String::from("example.com"), String::from(":1337")) - ) - } -} diff --git a/src/service/account_data/data.rs b/src/service/account_data/data.rs new file mode 100644 index 00000000..c7c92981 --- /dev/null +++ b/src/service/account_data/data.rs @@ -0,0 +1,35 @@ +use std::collections::HashMap; + +use crate::Result; +use ruma::{ + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, + serde::Raw, + RoomId, UserId, +}; + +pub trait Data: Send + Sync { + /// Places one event in the account data of the user and removes the previous entry. + fn update( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + event_type: RoomAccountDataEventType, + data: &serde_json::Value, + ) -> Result<()>; + + /// Searches the account data for a specific kind. + fn get( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + kind: RoomAccountDataEventType, + ) -> Result>>; + + /// Returns all changes to the account data that happened after `since`. + fn changes_since( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + since: u64, + ) -> Result>>; +} diff --git a/src/service/account_data/mod.rs b/src/service/account_data/mod.rs new file mode 100644 index 00000000..f9c49b1a --- /dev/null +++ b/src/service/account_data/mod.rs @@ -0,0 +1,53 @@ +mod data; + +pub use data::Data; + +use ruma::{ + events::{AnyEphemeralRoomEvent, RoomAccountDataEventType}, + serde::Raw, + RoomId, UserId, +}; + +use std::collections::HashMap; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Places one event in the account data of the user and removes the previous entry. + #[tracing::instrument(skip(self, room_id, user_id, event_type, data))] + pub fn update( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + event_type: RoomAccountDataEventType, + data: &serde_json::Value, + ) -> Result<()> { + self.db.update(room_id, user_id, event_type, data) + } + + /// Searches the account data for a specific kind. + #[tracing::instrument(skip(self, room_id, user_id, event_type))] + pub fn get( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + event_type: RoomAccountDataEventType, + ) -> Result>> { + self.db.get(room_id, user_id, event_type) + } + + /// Returns all changes to the account data that happened after `since`. + #[tracing::instrument(skip(self, room_id, user_id, since))] + pub fn changes_since( + &self, + room_id: Option<&RoomId>, + user_id: &UserId, + since: u64, + ) -> Result>> { + self.db.changes_since(room_id, user_id, since) + } +} diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs new file mode 100644 index 00000000..5766b2f2 --- /dev/null +++ b/src/service/admin/mod.rs @@ -0,0 +1,1194 @@ +use std::{ + collections::BTreeMap, + convert::{TryFrom, TryInto}, + sync::Arc, + time::Instant, +}; + +use clap::Parser; +use regex::Regex; +use ruma::{ + events::{ + room::{ + canonical_alias::RoomCanonicalAliasEventContent, + create::RoomCreateEventContent, + guest_access::{GuestAccess, RoomGuestAccessEventContent}, + history_visibility::{HistoryVisibility, RoomHistoryVisibilityEventContent}, + join_rules::{JoinRule, RoomJoinRulesEventContent}, + member::{MembershipState, RoomMemberEventContent}, + message::RoomMessageEventContent, + name::RoomNameEventContent, + power_levels::RoomPowerLevelsEventContent, + topic::RoomTopicEventContent, + }, + RoomEventType, + }, + EventId, OwnedRoomAliasId, RoomAliasId, RoomId, RoomVersionId, ServerName, UserId, +}; +use serde_json::value::to_raw_value; +use tokio::sync::{mpsc, Mutex, MutexGuard}; + +use crate::{ + api::client_server::{leave_all_rooms, AUTO_GEN_PASSWORD_LENGTH}, + services, + utils::{self, HtmlEscape}, + Error, PduEvent, Result, +}; + +use super::pdu::PduBuilder; + +#[cfg_attr(test, derive(Debug))] +#[derive(Parser)] +#[command(name = "@conduit:server.name:", version = env!("CARGO_PKG_VERSION"))] +enum AdminCommand { + #[command(verbatim_doc_comment)] + /// Register an appservice using its registration YAML + /// + /// This command needs a YAML generated by an appservice (such as a bridge), + /// which must be provided in a Markdown code-block below the command. + /// + /// Registering a new bridge using the ID of an existing bridge will replace + /// the old one. + /// + /// [commandbody] + /// # ``` + /// # yaml content here + /// # ``` + RegisterAppservice, + + /// Unregister an appservice using its ID + /// + /// You can find the ID using the `list-appservices` command. + UnregisterAppservice { + /// The appservice to unregister + appservice_identifier: String, + }, + + /// List all the currently registered appservices + ListAppservices, + + /// List all rooms the server knows about + ListRooms, + + /// List users in the database + ListLocalUsers, + + /// List all rooms we are currently handling an incoming pdu from + IncomingFederation, + + /// Deactivate a user + /// + /// User will not be removed from all rooms by default. + /// Use --leave-rooms to force the user to leave all rooms + DeactivateUser { + #[arg(short, long)] + leave_rooms: bool, + user_id: Box, + }, + + #[command(verbatim_doc_comment)] + /// Deactivate a list of users + /// + /// Recommended to use in conjunction with list-local-users. + /// + /// Users will not be removed from joined rooms by default. + /// Can be overridden with --leave-rooms flag. + /// Removing a mass amount of users from a room may cause a significant amount of leave events. + /// The time to leave rooms may depend significantly on joined rooms and servers. + /// + /// [commandbody] + /// # ``` + /// # User list here + /// # ``` + DeactivateAll { + #[arg(short, long)] + /// Remove users from their joined rooms + leave_rooms: bool, + #[arg(short, long)] + /// Also deactivate admin accounts + force: bool, + }, + + /// Get the auth_chain of a PDU + GetAuthChain { + /// An event ID (the $ character followed by the base64 reference hash) + event_id: Box, + }, + + #[command(verbatim_doc_comment)] + /// Parse and print a PDU from a JSON + /// + /// The PDU event is only checked for validity and is not added to the + /// database. + /// + /// [commandbody] + /// # ``` + /// # PDU json content here + /// # ``` + ParsePdu, + + /// Retrieve and print a PDU by ID from the Conduit database + GetPdu { + /// An event ID (a $ followed by the base64 reference hash) + event_id: Box, + }, + + /// Print database memory usage statistics + DatabaseMemoryUsage, + + /// Show configuration values + ShowConfig, + + /// Reset user password + ResetPassword { + /// Username of the user for whom the password should be reset + username: String, + }, + + /// Create a new user + CreateUser { + /// Username of the new user + username: String, + /// Password of the new user, if unspecified one is generated + password: Option, + }, + + /// Disables incoming federation handling for a room. + DisableRoom { room_id: Box }, + /// Enables incoming federation handling for a room again. + EnableRoom { room_id: Box }, +} + +#[derive(Debug)] +pub enum AdminRoomEvent { + ProcessMessage(String), + SendMessage(RoomMessageEventContent), +} + +pub struct Service { + pub sender: mpsc::UnboundedSender, + receiver: Mutex>, +} + +impl Service { + pub fn build() -> Arc { + let (sender, receiver) = mpsc::unbounded_channel(); + Arc::new(Self { + sender, + receiver: Mutex::new(receiver), + }) + } + + pub fn start_handler(self: &Arc) { + let self2 = Arc::clone(self); + tokio::spawn(async move { + self2.handler().await; + }); + } + + async fn handler(&self) { + let mut receiver = self.receiver.lock().await; + // TODO: Use futures when we have long admin commands + //let mut futures = FuturesUnordered::new(); + + let conduit_user = UserId::parse(format!("@conduit:{}", services().globals.server_name())) + .expect("@conduit:server_name is valid"); + + let conduit_room = services() + .rooms + .alias + .resolve_local_alias( + format!("#admins:{}", services().globals.server_name()) + .as_str() + .try_into() + .expect("#admins:server_name is a valid room alias"), + ) + .expect("Database data for admin room alias must be valid") + .expect("Admin room must exist"); + + let send_message = |message: RoomMessageEventContent, mutex_lock: &MutexGuard<'_, ()>| { + services() + .rooms + .timeline + .build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&message) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &conduit_room, + mutex_lock, + ) + .unwrap(); + }; + + loop { + tokio::select! { + Some(event) = receiver.recv() => { + let message_content = match event { + AdminRoomEvent::SendMessage(content) => content, + AdminRoomEvent::ProcessMessage(room_message) => self.process_admin_message(room_message).await + }; + + let mutex_state = Arc::clone( + services().globals + .roomid_mutex_state + .write() + .unwrap() + .entry(conduit_room.to_owned()) + .or_default(), + ); + + let state_lock = mutex_state.lock().await; + + send_message(message_content, &state_lock); + + drop(state_lock); + } + } + } + } + + pub fn process_message(&self, room_message: String) { + self.sender + .send(AdminRoomEvent::ProcessMessage(room_message)) + .unwrap(); + } + + pub fn send_message(&self, message_content: RoomMessageEventContent) { + self.sender + .send(AdminRoomEvent::SendMessage(message_content)) + .unwrap(); + } + + // Parse and process a message from the admin room + async fn process_admin_message(&self, room_message: String) -> RoomMessageEventContent { + let mut lines = room_message.lines(); + let command_line = lines.next().expect("each string has at least one line"); + let body: Vec<_> = lines.collect(); + + let admin_command = match self.parse_admin_command(command_line) { + Ok(command) => command, + Err(error) => { + let server_name = services().globals.server_name(); + let message = error.replace("server.name", server_name.as_str()); + let html_message = self.usage_to_html(&message, server_name); + + return RoomMessageEventContent::text_html(message, html_message); + } + }; + + match self.process_admin_command(admin_command, body).await { + Ok(reply_message) => reply_message, + Err(error) => { + let markdown_message = format!( + "Encountered an error while handling the command:\n\ + ```\n{}\n```", + error, + ); + let html_message = format!( + "Encountered an error while handling the command:\n\ +
\n{}\n
", + error, + ); + + RoomMessageEventContent::text_html(markdown_message, html_message) + } + } + } + + // Parse chat messages from the admin room into an AdminCommand object + fn parse_admin_command(&self, command_line: &str) -> std::result::Result { + // Note: argv[0] is `@conduit:servername:`, which is treated as the main command + let mut argv: Vec<_> = command_line.split_whitespace().collect(); + + // Replace `help command` with `command --help` + // Clap has a help subcommand, but it omits the long help description. + if argv.len() > 1 && argv[1] == "help" { + argv.remove(1); + argv.push("--help"); + } + + // Backwards compatibility with `register_appservice`-style commands + let command_with_dashes; + if argv.len() > 1 && argv[1].contains('_') { + command_with_dashes = argv[1].replace('_', "-"); + argv[1] = &command_with_dashes; + } + + AdminCommand::try_parse_from(argv).map_err(|error| error.to_string()) + } + + async fn process_admin_command( + &self, + command: AdminCommand, + body: Vec<&str>, + ) -> Result { + let reply_message_content = match command { + AdminCommand::RegisterAppservice => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { + let appservice_config = body[1..body.len() - 1].join("\n"); + let parsed_config = + serde_yaml::from_str::(&appservice_config); + match parsed_config { + Ok(yaml) => match services().appservice.register_appservice(yaml) { + Ok(id) => RoomMessageEventContent::text_plain(format!( + "Appservice registered with ID: {}.", + id + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to register appservice: {}", + e + )), + }, + Err(e) => RoomMessageEventContent::text_plain(format!( + "Could not parse appservice config: {}", + e + )), + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } + AdminCommand::UnregisterAppservice { + appservice_identifier, + } => match services() + .appservice + .unregister_appservice(&appservice_identifier) + { + Ok(()) => RoomMessageEventContent::text_plain("Appservice unregistered."), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to unregister appservice: {}", + e + )), + }, + AdminCommand::ListAppservices => { + if let Ok(appservices) = services() + .appservice + .iter_ids() + .map(|ids| ids.collect::>()) + { + let count = appservices.len(); + let output = format!( + "Appservices ({}): {}", + count, + appservices + .into_iter() + .filter_map(|r| r.ok()) + .collect::>() + .join(", ") + ); + RoomMessageEventContent::text_plain(output) + } else { + RoomMessageEventContent::text_plain("Failed to get appservices.") + } + } + AdminCommand::ListRooms => { + let room_ids = services().rooms.metadata.iter_ids(); + let output = format!( + "Rooms:\n{}", + room_ids + .filter_map(|r| r.ok()) + .map(|id| id.to_string() + + "\tMembers: " + + &services() + .rooms + .state_cache + .room_joined_count(&id) + .ok() + .flatten() + .unwrap_or(0) + .to_string()) + .collect::>() + .join("\n") + ); + RoomMessageEventContent::text_plain(output) + } + AdminCommand::ListLocalUsers => match services().users.list_local_users() { + Ok(users) => { + let mut msg: String = format!("Found {} local user account(s):\n", users.len()); + msg += &users.join("\n"); + RoomMessageEventContent::text_plain(&msg) + } + Err(e) => RoomMessageEventContent::text_plain(e.to_string()), + }, + AdminCommand::IncomingFederation => { + let map = services() + .globals + .roomid_federationhandletime + .read() + .unwrap(); + let mut msg: String = format!("Handling {} incoming pdus:\n", map.len()); + + for (r, (e, i)) in map.iter() { + let elapsed = i.elapsed(); + msg += &format!( + "{} {}: {}m{}s\n", + r, + e, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + RoomMessageEventContent::text_plain(&msg) + } + AdminCommand::GetAuthChain { event_id } => { + let event_id = Arc::::from(event_id); + if let Some(event) = services().rooms.timeline.get_pdu_json(&event_id)? { + let room_id_str = event + .get("room_id") + .and_then(|val| val.as_str()) + .ok_or_else(|| Error::bad_database("Invalid event in database"))?; + + let room_id = <&RoomId>::try_from(room_id_str).map_err(|_| { + Error::bad_database("Invalid room id field in event in database") + })?; + let start = Instant::now(); + let count = services() + .rooms + .auth_chain + .get_auth_chain(room_id, vec![event_id]) + .await? + .count(); + let elapsed = start.elapsed(); + RoomMessageEventContent::text_plain(format!( + "Loaded auth chain with length {} in {:?}", + count, elapsed + )) + } else { + RoomMessageEventContent::text_plain("Event not found.") + } + } + AdminCommand::ParsePdu => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { + let string = body[1..body.len() - 1].join("\n"); + match serde_json::from_str(&string) { + Ok(value) => { + match ruma::signatures::reference_hash(&value, &RoomVersionId::V6) { + Ok(hash) => { + let event_id = EventId::parse(format!("${}", hash)); + + match serde_json::from_value::( + serde_json::to_value(value).expect("value is json"), + ) { + Ok(pdu) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\n{:#?}", + event_id, pdu + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "EventId: {:?}\nCould not parse event: {}", + event_id, e + )), + } + } + Err(e) => RoomMessageEventContent::text_plain(format!( + "Could not parse PDU JSON: {:?}", + e + )), + } + } + Err(e) => RoomMessageEventContent::text_plain(format!( + "Invalid json in command body: {}", + e + )), + } + } else { + RoomMessageEventContent::text_plain("Expected code block in command body.") + } + } + AdminCommand::GetPdu { event_id } => { + let mut outlier = false; + let mut pdu_json = services() + .rooms + .timeline + .get_non_outlier_pdu_json(&event_id)?; + if pdu_json.is_none() { + outlier = true; + pdu_json = services().rooms.timeline.get_pdu_json(&event_id)?; + } + match pdu_json { + Some(json) => { + let json_text = serde_json::to_string_pretty(&json) + .expect("canonical json is valid json"); + RoomMessageEventContent::text_html( + format!( + "{}\n```json\n{}\n```", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + json_text + ), + format!( + "

{}

\n
{}\n
\n", + if outlier { + "PDU is outlier" + } else { + "PDU was accepted" + }, + HtmlEscape(&json_text) + ), + ) + } + None => RoomMessageEventContent::text_plain("PDU not found."), + } + } + AdminCommand::DatabaseMemoryUsage => match services().globals.db.memory_usage() { + Ok(response) => RoomMessageEventContent::text_plain(response), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Failed to get database memory usage: {}", + e + )), + }, + AdminCommand::ShowConfig => { + // Construct and send the response + RoomMessageEventContent::text_plain(format!("{}", services().globals.config)) + } + AdminCommand::ResetPassword { username } => { + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + services().globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + + // Check if the specified user is valid + if !services().users.exists(&user_id)? + || services().users.is_deactivated(&user_id)? + || user_id + == UserId::parse_with_server_name( + "conduit", + services().globals.server_name(), + ) + .expect("conduit user exists") + { + return Ok(RoomMessageEventContent::text_plain( + "The specified user does not exist or is deactivated!", + )); + } + + let new_password = utils::random_string(AUTO_GEN_PASSWORD_LENGTH); + + match services() + .users + .set_password(&user_id, Some(new_password.as_str())) + { + Ok(()) => RoomMessageEventContent::text_plain(format!( + "Successfully reset the password for user {}: {}", + user_id, new_password + )), + Err(e) => RoomMessageEventContent::text_plain(format!( + "Couldn't reset the password for user {}: {}", + user_id, e + )), + } + } + AdminCommand::CreateUser { username, password } => { + let password = password.unwrap_or(utils::random_string(AUTO_GEN_PASSWORD_LENGTH)); + // Validate user id + let user_id = match UserId::parse_with_server_name( + username.as_str().to_lowercase(), + services().globals.server_name(), + ) { + Ok(id) => id, + Err(e) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "The supplied username is not a valid username: {}", + e + ))) + } + }; + if user_id.is_historical() { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} is not allowed due to historical" + ))); + } + if services().users.exists(&user_id)? { + return Ok(RoomMessageEventContent::text_plain(format!( + "userid {user_id} already exists" + ))); + } + // Create user + services().users.create(&user_id, Some(password.as_str()))?; + + // Default to pretty displayname + let mut displayname = user_id.localpart().to_owned(); + + // If enabled append lightning bolt to display name (default true) + if services().globals.enable_lightning_bolt() { + displayname.push_str(" ⚡️"); + } + + services() + .users + .set_displayname(&user_id, Some(displayname))?; + + // Initial account data + services().account_data.update( + None, + &user_id, + ruma::events::GlobalAccountDataEventType::PushRules + .to_string() + .into(), + &serde_json::to_value(ruma::events::push_rules::PushRulesEvent { + content: ruma::events::push_rules::PushRulesEventContent { + global: ruma::push::Ruleset::server_default(&user_id), + }, + }) + .expect("to json value always works"), + )?; + + // we dont add a device since we're not the user, just the creator + + // Inhibit login does not work for guests + RoomMessageEventContent::text_plain(format!( + "Created user with user_id: {user_id} and password: {password}" + )) + } + AdminCommand::DisableRoom { room_id } => { + services().rooms.metadata.disable_room(&room_id, true)?; + RoomMessageEventContent::text_plain("Room disabled.") + } + AdminCommand::EnableRoom { room_id } => { + services().rooms.metadata.disable_room(&room_id, false)?; + RoomMessageEventContent::text_plain("Room enabled.") + } + AdminCommand::DeactivateUser { + leave_rooms, + user_id, + } => { + let user_id = Arc::::from(user_id); + if services().users.exists(&user_id)? { + RoomMessageEventContent::text_plain(format!( + "Making {} leave all rooms before deactivation...", + user_id + )); + + services().users.deactivate_account(&user_id)?; + + if leave_rooms { + leave_all_rooms(&user_id).await?; + } + + RoomMessageEventContent::text_plain(format!( + "User {} has been deactivated", + user_id + )) + } else { + RoomMessageEventContent::text_plain(format!( + "User {} doesn't exist on this server", + user_id + )) + } + } + AdminCommand::DeactivateAll { leave_rooms, force } => { + if body.len() > 2 && body[0].trim() == "```" && body.last().unwrap().trim() == "```" + { + let usernames = body.clone().drain(1..body.len() - 1).collect::>(); + + let mut user_ids: Vec<&UserId> = Vec::new(); + + for &username in &usernames { + match <&UserId>::try_from(username) { + Ok(user_id) => user_ids.push(user_id), + Err(_) => { + return Ok(RoomMessageEventContent::text_plain(format!( + "{} is not a valid username", + username + ))) + } + } + } + + let mut deactivation_count = 0; + let mut admins = Vec::new(); + + if !force { + user_ids.retain(|&user_id| match services().users.is_admin(user_id) { + Ok(is_admin) => match is_admin { + true => { + admins.push(user_id.localpart()); + false + } + false => true, + }, + Err(_) => false, + }) + } + + for &user_id in &user_ids { + match services().users.deactivate_account(user_id) { + Ok(_) => deactivation_count += 1, + Err(_) => {} + } + } + + if leave_rooms { + for &user_id in &user_ids { + let _ = leave_all_rooms(user_id).await; + } + } + + if admins.is_empty() { + RoomMessageEventContent::text_plain(format!( + "Deactivated {} accounts.", + deactivation_count + )) + } else { + RoomMessageEventContent::text_plain(format!("Deactivated {} accounts.\nSkipped admin accounts: {:?}. Use --force to deactivate admin accounts", deactivation_count, admins.join(", "))) + } + } else { + RoomMessageEventContent::text_plain( + "Expected code block in command body. Add --help for details.", + ) + } + } + }; + + Ok(reply_message_content) + } + + // Utility to turn clap's `--help` text to HTML. + fn usage_to_html(&self, text: &str, server_name: &ServerName) -> String { + // Replace `@conduit:servername:-subcmdname` with `@conduit:servername: subcmdname` + let text = text.replace( + &format!("@conduit:{}:-", server_name), + &format!("@conduit:{}: ", server_name), + ); + + // For the conduit admin room, subcommands become main commands + let text = text.replace("SUBCOMMAND", "COMMAND"); + let text = text.replace("subcommand", "command"); + + // Escape option names (e.g. ``) since they look like HTML tags + let text = text.replace('<', "<").replace('>', ">"); + + // Italicize the first line (command name and version text) + let re = Regex::new("^(.*?)\n").expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1\n"); + + // Unmerge wrapped lines + let text = text.replace("\n ", " "); + + // Wrap option names in backticks. The lines look like: + // -V, --version Prints version information + // And are converted to: + // -V, --version: Prints version information + // (?m) enables multi-line mode for ^ and $ + let re = Regex::new("(?m)^ (([a-zA-Z_&;-]+(, )?)+) +(.*)$") + .expect("Regex compilation should not fail"); + let text = re.replace_all(&text, "$1: $4"); + + // Look for a `[commandbody]` tag. If it exists, use all lines below it that + // start with a `#` in the USAGE section. + let mut text_lines: Vec<&str> = text.lines().collect(); + let mut command_body = String::new(); + + if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { + text_lines.remove(line_index); + + while text_lines + .get(line_index) + .map(|line| line.starts_with('#')) + .unwrap_or(false) + { + command_body += if text_lines[line_index].starts_with("# ") { + &text_lines[line_index][2..] + } else { + &text_lines[line_index][1..] + }; + command_body += "[nobr]\n"; + text_lines.remove(line_index); + } + } + + let text = text_lines.join("\n"); + + // Improve the usage section + let text = if command_body.is_empty() { + // Wrap the usage line in code tags + let re = Regex::new("(?m)^USAGE:\n (@conduit:.*)$") + .expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n$1").to_string() + } else { + // Wrap the usage line in a code block, and add a yaml block example + // This makes the usage of e.g. `register-appservice` more accurate + let re = Regex::new("(?m)^USAGE:\n (.*?)\n\n") + .expect("Regex compilation should not fail"); + re.replace_all(&text, "USAGE:\n
$1[nobr]\n[commandbodyblock]
") + .replace("[commandbodyblock]", &command_body) + }; + + // Add HTML line-breaks + + text.replace("\n\n\n", "\n\n") + .replace('\n', "
\n") + .replace("[nobr]
", "") + } + + /// Create the admin room. + /// + /// Users in this room are considered admins by conduit, and the room can be + /// used to issue admin commands by talking to the server user inside it. + pub(crate) async fn create_admin_room(&self) -> Result<()> { + let room_id = RoomId::new(services().globals.server_name()); + + services().rooms.short.get_or_create_shortroomid(&room_id)?; + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Create a user for the server + let conduit_user = + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); + + services().users.create(&conduit_user, None)?; + + let mut content = RoomCreateEventContent::new(conduit_user.clone()); + content.federate = true; + content.predecessor = None; + content.room_version = services().globals.default_room_version(); + + // 1. The room create event + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCreate, + content: to_raw_value(&content).expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 2. Make conduit bot join + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(conduit_user.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 3. Power levels + let mut users = BTreeMap::new(); + users.insert(conduit_user.clone(), 100.into()); + + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 4.1 Join Rules + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomJoinRules, + content: to_raw_value(&RoomJoinRulesEventContent::new(JoinRule::Invite)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 4.2 History Visibility + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomHistoryVisibility, + content: to_raw_value(&RoomHistoryVisibilityEventContent::new( + HistoryVisibility::Shared, + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 4.3 Guest Access + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomGuestAccess, + content: to_raw_value(&RoomGuestAccessEventContent::new(GuestAccess::Forbidden)) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 5. Events implied by name and topic + let room_name = format!("{} Admin Room", services().globals.server_name()); + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomName, + content: to_raw_value(&RoomNameEventContent::new(Some(room_name))) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomTopic, + content: to_raw_value(&RoomTopicEventContent { + topic: format!("Manage {}", services().globals.server_name()), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // 6. Room alias + let alias: OwnedRoomAliasId = format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomCanonicalAlias, + content: to_raw_value(&RoomCanonicalAliasEventContent { + alias: Some(alias.clone()), + alt_aliases: Vec::new(), + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + services().rooms.alias.set_alias(&alias, &room_id)?; + + Ok(()) + } + + /// Invite the user to the conduit admin room. + /// + /// In conduit, this is equivalent to granting admin privileges. + pub(crate) async fn make_user_admin( + &self, + user_id: &UserId, + displayname: String, + ) -> Result<()> { + let admin_room_alias: Box = + format!("#admins:{}", services().globals.server_name()) + .try_into() + .expect("#admins:server_name is a valid alias name"); + let room_id = services() + .rooms + .alias + .resolve_local_alias(&admin_room_alias)? + .expect("Admin room must exist"); + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.clone()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Use the server user to grant the new admin's power level + let conduit_user = + UserId::parse_with_server_name("conduit", services().globals.server_name()) + .expect("@conduit:server_name is valid"); + + // Invite and join the real user + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Invite, + displayname: None, + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMember, + content: to_raw_value(&RoomMemberEventContent { + membership: MembershipState::Join, + displayname: Some(displayname), + avatar_url: None, + is_direct: None, + third_party_invite: None, + blurhash: None, + reason: None, + join_authorized_via_users_server: None, + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some(user_id.to_string()), + redacts: None, + }, + user_id, + &room_id, + &state_lock, + )?; + + // Set power level + let mut users = BTreeMap::new(); + users.insert(conduit_user.to_owned(), 100.into()); + users.insert(user_id.to_owned(), 100.into()); + + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomPowerLevels, + content: to_raw_value(&RoomPowerLevelsEventContent { + users, + ..Default::default() + }) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: Some("".to_owned()), + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + // Send welcome message + services().rooms.timeline.build_and_append_pdu( + PduBuilder { + event_type: RoomEventType::RoomMessage, + content: to_raw_value(&RoomMessageEventContent::text_html( + format!("## Thank you for trying out Conduit!\n\nConduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.\n\nHelpful links:\n> Website: https://conduit.rs\n> Git and Documentation: https://gitlab.com/famedly/conduit\n> Report issues: https://gitlab.com/famedly/conduit/-/issues\n\nFor a list of available commands, send the following message in this room: `@conduit:{}: --help`\n\nHere are some rooms you can join (by typing the command):\n\nConduit room (Ask questions and get notified on updates):\n`/join #conduit:fachschaften.org`\n\nConduit lounge (Off-topic, only Conduit users are allowed to join)\n`/join #conduit-lounge:conduit.rs`", services().globals.server_name()), + format!("

Thank you for trying out Conduit!

\n

Conduit is currently in Beta. This means you can join and participate in most Matrix rooms, but not all features are supported and you might run into bugs from time to time.

\n

Helpful links:

\n
\n

Website: https://conduit.rs
Git and Documentation: https://gitlab.com/famedly/conduit
Report issues: https://gitlab.com/famedly/conduit/-/issues

\n
\n

For a list of available commands, send the following message in this room: @conduit:{}: --help

\n

Here are some rooms you can join (by typing the command):

\n

Conduit room (Ask questions and get notified on updates):
/join #conduit:fachschaften.org

\n

Conduit lounge (Off-topic, only Conduit users are allowed to join)
/join #conduit-lounge:conduit.rs

\n", services().globals.server_name()), + )) + .expect("event is valid, we just created it"), + unsigned: None, + state_key: None, + redacts: None, + }, + &conduit_user, + &room_id, + &state_lock, + )?; + + Ok(()) + } +} + +#[cfg(test)] +mod test { + use super::*; + + #[test] + fn get_help_short() { + get_help_inner("-h"); + } + + #[test] + fn get_help_long() { + get_help_inner("--help"); + } + + #[test] + fn get_help_subcommand() { + get_help_inner("help"); + } + + fn get_help_inner(input: &str) { + let error = AdminCommand::try_parse_from(["argv[0] doesn't matter", input]) + .unwrap_err() + .to_string(); + + // Search for a handful of keywords that suggest the help printed properly + assert!(error.contains("Usage:")); + assert!(error.contains("Commands:")); + assert!(error.contains("Options:")); + } +} diff --git a/src/service/appservice/data.rs b/src/service/appservice/data.rs new file mode 100644 index 00000000..744f0f94 --- /dev/null +++ b/src/service/appservice/data.rs @@ -0,0 +1,19 @@ +use crate::Result; + +pub trait Data: Send + Sync { + /// Registers an appservice and returns the ID to the caller + fn register_appservice(&self, yaml: serde_yaml::Value) -> Result; + + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + fn unregister_appservice(&self, service_name: &str) -> Result<()>; + + fn get_registration(&self, id: &str) -> Result>; + + fn iter_ids<'a>(&'a self) -> Result> + 'a>>; + + fn all(&self) -> Result>; +} diff --git a/src/service/appservice/mod.rs b/src/service/appservice/mod.rs new file mode 100644 index 00000000..3052964d --- /dev/null +++ b/src/service/appservice/mod.rs @@ -0,0 +1,37 @@ +mod data; + +pub use data::Data; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Registers an appservice and returns the ID to the caller + pub fn register_appservice(&self, yaml: serde_yaml::Value) -> Result { + self.db.register_appservice(yaml) + } + + /// Remove an appservice registration + /// + /// # Arguments + /// + /// * `service_name` - the name you send to register the service previously + pub fn unregister_appservice(&self, service_name: &str) -> Result<()> { + self.db.unregister_appservice(service_name) + } + + pub fn get_registration(&self, id: &str) -> Result> { + self.db.get_registration(id) + } + + pub fn iter_ids(&self) -> Result> + '_> { + self.db.iter_ids() + } + + pub fn all(&self) -> Result> { + self.db.all() + } +} diff --git a/src/service/globals/data.rs b/src/service/globals/data.rs new file mode 100644 index 00000000..04371a0a --- /dev/null +++ b/src/service/globals/data.rs @@ -0,0 +1,34 @@ +use std::collections::BTreeMap; + +use async_trait::async_trait; +use ruma::{ + api::federation::discovery::{ServerSigningKeys, VerifyKey}, + signatures::Ed25519KeyPair, + DeviceId, OwnedServerSigningKeyId, ServerName, UserId, +}; + +use crate::Result; + +#[async_trait] +pub trait Data: Send + Sync { + fn next_count(&self) -> Result; + fn current_count(&self) -> Result; + async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + fn cleanup(&self) -> Result<()>; + fn memory_usage(&self) -> Result; + fn load_keypair(&self) -> Result; + fn remove_keypair(&self) -> Result<()>; + fn add_signing_key( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result>; + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result>; + fn database_version(&self) -> Result; + fn bump_database_version(&self, new_version: u64) -> Result<()>; +} diff --git a/src/service/globals/mod.rs b/src/service/globals/mod.rs new file mode 100644 index 00000000..affc0516 --- /dev/null +++ b/src/service/globals/mod.rs @@ -0,0 +1,352 @@ +mod data; +pub use data::Data; +use ruma::{ + OwnedDeviceId, OwnedEventId, OwnedRoomId, OwnedServerName, OwnedServerSigningKeyId, OwnedUserId, +}; + +use crate::api::server_server::FedDest; + +use crate::{Config, Error, Result}; +use ruma::{ + api::{ + client::sync::sync_events, + federation::discovery::{ServerSigningKeys, VerifyKey}, + }, + DeviceId, RoomVersionId, ServerName, UserId, +}; +use std::{ + collections::{BTreeMap, HashMap}, + fs, + future::Future, + net::{IpAddr, SocketAddr}, + path::PathBuf, + sync::{Arc, Mutex, RwLock}, + time::{Duration, Instant}, +}; +use tokio::sync::{broadcast, watch::Receiver, Mutex as TokioMutex, Semaphore}; +use tracing::error; +use trust_dns_resolver::TokioAsyncResolver; + +type WellKnownMap = HashMap; +type TlsNameMap = HashMap, u16)>; +type RateLimitState = (Instant, u32); // Time if last failed try, number of failed tries +type SyncHandle = ( + Option, // since + Receiver>>, // rx +); + +pub struct Service { + pub db: &'static dyn Data, + + pub actual_destination_cache: Arc>, // actual_destination, host + pub tls_name_override: Arc>, + pub config: Config, + keypair: Arc, + dns_resolver: TokioAsyncResolver, + jwt_decoding_key: Option, + federation_client: reqwest::Client, + default_client: reqwest::Client, + pub stable_room_versions: Vec, + pub unstable_room_versions: Vec, + pub bad_event_ratelimiter: Arc>>, + pub bad_signature_ratelimiter: Arc, RateLimitState>>>, + pub servername_ratelimiter: Arc>>>, + pub sync_receivers: RwLock>, + pub roomid_mutex_insert: RwLock>>>, + pub roomid_mutex_state: RwLock>>>, + pub roomid_mutex_federation: RwLock>>>, // this lock will be held longer + pub roomid_federationhandletime: RwLock>, + pub stateres_mutex: Arc>, + pub rotate: RotationHandler, +} + +/// Handles "rotation" of long-polling requests. "Rotation" in this context is similar to "rotation" of log files and the like. +/// +/// This is utilized to have sync workers return early and release read locks on the database. +pub struct RotationHandler(broadcast::Sender<()>, broadcast::Receiver<()>); + +impl RotationHandler { + pub fn new() -> Self { + let (s, r) = broadcast::channel(1); + Self(s, r) + } + + pub fn watch(&self) -> impl Future { + let mut r = self.0.subscribe(); + + async move { + let _ = r.recv().await; + } + } + + pub fn fire(&self) { + let _ = self.0.send(()); + } +} + +impl Default for RotationHandler { + fn default() -> Self { + Self::new() + } +} + +impl Service { + pub fn load(db: &'static dyn Data, config: Config) -> Result { + let keypair = db.load_keypair(); + + let keypair = match keypair { + Ok(k) => k, + Err(e) => { + error!("Keypair invalid. Deleting..."); + db.remove_keypair()?; + return Err(e); + } + }; + + let tls_name_override = Arc::new(RwLock::new(TlsNameMap::new())); + + let jwt_decoding_key = config + .jwt_secret + .as_ref() + .map(|secret| jsonwebtoken::DecodingKey::from_secret(secret.as_bytes())); + + let default_client = reqwest_client_builder(&config)?.build()?; + let name_override = Arc::clone(&tls_name_override); + let federation_client = reqwest_client_builder(&config)? + .resolve_fn(move |domain| { + let read_guard = name_override.read().unwrap(); + let (override_name, port) = read_guard.get(&domain)?; + let first_name = override_name.get(0)?; + Some(SocketAddr::new(*first_name, *port)) + }) + .build()?; + + // Supported and stable room versions + let stable_room_versions = vec![ + RoomVersionId::V6, + RoomVersionId::V7, + RoomVersionId::V8, + RoomVersionId::V9, + RoomVersionId::V10, + ]; + // Experimental, partially supported room versions + let unstable_room_versions = vec![RoomVersionId::V3, RoomVersionId::V4, RoomVersionId::V5]; + + let mut s = Self { + db, + config, + keypair: Arc::new(keypair), + dns_resolver: TokioAsyncResolver::tokio_from_system_conf().map_err(|e| { + error!( + "Failed to set up trust dns resolver with system config: {}", + e + ); + Error::bad_config("Failed to set up trust dns resolver with system config.") + })?, + actual_destination_cache: Arc::new(RwLock::new(WellKnownMap::new())), + tls_name_override, + federation_client, + default_client, + jwt_decoding_key, + stable_room_versions, + unstable_room_versions, + bad_event_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + bad_signature_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + servername_ratelimiter: Arc::new(RwLock::new(HashMap::new())), + roomid_mutex_state: RwLock::new(HashMap::new()), + roomid_mutex_insert: RwLock::new(HashMap::new()), + roomid_mutex_federation: RwLock::new(HashMap::new()), + roomid_federationhandletime: RwLock::new(HashMap::new()), + stateres_mutex: Arc::new(Mutex::new(())), + sync_receivers: RwLock::new(HashMap::new()), + rotate: RotationHandler::new(), + }; + + fs::create_dir_all(s.get_media_folder())?; + + if !s + .supported_room_versions() + .contains(&s.config.default_room_version) + { + error!("Room version in config isn't supported, falling back to default version"); + s.config.default_room_version = crate::config::default_default_room_version(); + }; + + Ok(s) + } + + /// Returns this server's keypair. + pub fn keypair(&self) -> &ruma::signatures::Ed25519KeyPair { + &self.keypair + } + + /// Returns a reqwest client which can be used to send requests + pub fn default_client(&self) -> reqwest::Client { + // Client is cheap to clone (Arc wrapper) and avoids lifetime issues + self.default_client.clone() + } + + /// Returns a client used for resolving .well-knowns + pub fn federation_client(&self) -> reqwest::Client { + // Client is cheap to clone (Arc wrapper) and avoids lifetime issues + self.federation_client.clone() + } + + #[tracing::instrument(skip(self))] + pub fn next_count(&self) -> Result { + self.db.next_count() + } + + #[tracing::instrument(skip(self))] + pub fn current_count(&self) -> Result { + self.db.current_count() + } + + pub async fn watch(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + self.db.watch(user_id, device_id).await + } + + pub fn cleanup(&self) -> Result<()> { + self.db.cleanup() + } + + pub fn memory_usage(&self) -> Result { + self.db.memory_usage() + } + + pub fn server_name(&self) -> &ServerName { + self.config.server_name.as_ref() + } + + pub fn max_request_size(&self) -> u32 { + self.config.max_request_size + } + + pub fn allow_registration(&self) -> bool { + self.config.allow_registration + } + + pub fn allow_encryption(&self) -> bool { + self.config.allow_encryption + } + + pub fn allow_federation(&self) -> bool { + self.config.allow_federation + } + + pub fn allow_room_creation(&self) -> bool { + self.config.allow_room_creation + } + + pub fn allow_unstable_room_versions(&self) -> bool { + self.config.allow_unstable_room_versions + } + + pub fn default_room_version(&self) -> RoomVersionId { + self.config.default_room_version.clone() + } + + pub fn enable_lightning_bolt(&self) -> bool { + self.config.enable_lightning_bolt + } + + pub fn trusted_servers(&self) -> &[OwnedServerName] { + &self.config.trusted_servers + } + + pub fn dns_resolver(&self) -> &TokioAsyncResolver { + &self.dns_resolver + } + + pub fn jwt_decoding_key(&self) -> Option<&jsonwebtoken::DecodingKey> { + self.jwt_decoding_key.as_ref() + } + + pub fn turn_password(&self) -> &String { + &self.config.turn_password + } + + pub fn turn_ttl(&self) -> u64 { + self.config.turn_ttl + } + + pub fn turn_uris(&self) -> &[String] { + &self.config.turn_uris + } + + pub fn turn_username(&self) -> &String { + &self.config.turn_username + } + + pub fn turn_secret(&self) -> &String { + &self.config.turn_secret + } + + pub fn emergency_password(&self) -> &Option { + &self.config.emergency_password + } + + pub fn supported_room_versions(&self) -> Vec { + let mut room_versions: Vec = vec![]; + room_versions.extend(self.stable_room_versions.clone()); + if self.allow_unstable_room_versions() { + room_versions.extend(self.unstable_room_versions.clone()); + }; + room_versions + } + + /// TODO: the key valid until timestamp is only honored in room version > 4 + /// Remove the outdated keys and insert the new ones. + /// + /// This doesn't actually check that the keys provided are newer than the old set. + pub fn add_signing_key( + &self, + origin: &ServerName, + new_keys: ServerSigningKeys, + ) -> Result> { + self.db.add_signing_key(origin, new_keys) + } + + /// This returns an empty `Ok(BTreeMap<..>)` when there are no keys found for the server. + pub fn signing_keys_for( + &self, + origin: &ServerName, + ) -> Result> { + self.db.signing_keys_for(origin) + } + + pub fn database_version(&self) -> Result { + self.db.database_version() + } + + pub fn bump_database_version(&self, new_version: u64) -> Result<()> { + self.db.bump_database_version(new_version) + } + + pub fn get_media_folder(&self) -> PathBuf { + let mut r = PathBuf::new(); + r.push(self.config.database_path.clone()); + r.push("media"); + r + } + + pub fn get_media_file(&self, key: &[u8]) -> PathBuf { + let mut r = PathBuf::new(); + r.push(self.config.database_path.clone()); + r.push("media"); + r.push(base64::encode_config(key, base64::URL_SAFE_NO_PAD)); + r + } +} + +fn reqwest_client_builder(config: &Config) -> Result { + let mut reqwest_client_builder = reqwest::Client::builder() + .connect_timeout(Duration::from_secs(30)) + .timeout(Duration::from_secs(60 * 3)); + + if let Some(proxy) = config.proxy.to_proxy()? { + reqwest_client_builder = reqwest_client_builder.proxy(proxy); + } + + Ok(reqwest_client_builder) +} diff --git a/src/service/key_backups/data.rs b/src/service/key_backups/data.rs new file mode 100644 index 00000000..bf640015 --- /dev/null +++ b/src/service/key_backups/data.rs @@ -0,0 +1,78 @@ +use std::collections::BTreeMap; + +use crate::Result; +use ruma::{ + api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, + serde::Raw, + OwnedRoomId, RoomId, UserId, +}; + +pub trait Data: Send + Sync { + fn create_backup( + &self, + user_id: &UserId, + backup_metadata: &Raw, + ) -> Result; + + fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()>; + + fn update_backup( + &self, + user_id: &UserId, + version: &str, + backup_metadata: &Raw, + ) -> Result; + + fn get_latest_backup_version(&self, user_id: &UserId) -> Result>; + + fn get_latest_backup(&self, user_id: &UserId) + -> Result)>>; + + fn get_backup(&self, user_id: &UserId, version: &str) -> Result>>; + + fn add_key( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + key_data: &Raw, + ) -> Result<()>; + + fn count_keys(&self, user_id: &UserId, version: &str) -> Result; + + fn get_etag(&self, user_id: &UserId, version: &str) -> Result; + + fn get_all( + &self, + user_id: &UserId, + version: &str, + ) -> Result>; + + fn get_room( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + ) -> Result>>; + + fn get_session( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + ) -> Result>>; + + fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()>; + + fn delete_room_keys(&self, user_id: &UserId, version: &str, room_id: &RoomId) -> Result<()>; + + fn delete_room_key( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + ) -> Result<()>; +} diff --git a/src/service/key_backups/mod.rs b/src/service/key_backups/mod.rs new file mode 100644 index 00000000..5fc52ced --- /dev/null +++ b/src/service/key_backups/mod.rs @@ -0,0 +1,127 @@ +mod data; +pub use data::Data; + +use crate::Result; +use ruma::{ + api::client::backup::{BackupAlgorithm, KeyBackupData, RoomKeyBackup}, + serde::Raw, + OwnedRoomId, RoomId, UserId, +}; +use std::collections::BTreeMap; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + pub fn create_backup( + &self, + user_id: &UserId, + backup_metadata: &Raw, + ) -> Result { + self.db.create_backup(user_id, backup_metadata) + } + + pub fn delete_backup(&self, user_id: &UserId, version: &str) -> Result<()> { + self.db.delete_backup(user_id, version) + } + + pub fn update_backup( + &self, + user_id: &UserId, + version: &str, + backup_metadata: &Raw, + ) -> Result { + self.db.update_backup(user_id, version, backup_metadata) + } + + pub fn get_latest_backup_version(&self, user_id: &UserId) -> Result> { + self.db.get_latest_backup_version(user_id) + } + + pub fn get_latest_backup( + &self, + user_id: &UserId, + ) -> Result)>> { + self.db.get_latest_backup(user_id) + } + + pub fn get_backup( + &self, + user_id: &UserId, + version: &str, + ) -> Result>> { + self.db.get_backup(user_id, version) + } + + pub fn add_key( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + key_data: &Raw, + ) -> Result<()> { + self.db + .add_key(user_id, version, room_id, session_id, key_data) + } + + pub fn count_keys(&self, user_id: &UserId, version: &str) -> Result { + self.db.count_keys(user_id, version) + } + + pub fn get_etag(&self, user_id: &UserId, version: &str) -> Result { + self.db.get_etag(user_id, version) + } + + pub fn get_all( + &self, + user_id: &UserId, + version: &str, + ) -> Result> { + self.db.get_all(user_id, version) + } + + pub fn get_room( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + ) -> Result>> { + self.db.get_room(user_id, version, room_id) + } + + pub fn get_session( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + ) -> Result>> { + self.db.get_session(user_id, version, room_id, session_id) + } + + pub fn delete_all_keys(&self, user_id: &UserId, version: &str) -> Result<()> { + self.db.delete_all_keys(user_id, version) + } + + pub fn delete_room_keys( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + ) -> Result<()> { + self.db.delete_room_keys(user_id, version, room_id) + } + + pub fn delete_room_key( + &self, + user_id: &UserId, + version: &str, + room_id: &RoomId, + session_id: &str, + ) -> Result<()> { + self.db + .delete_room_key(user_id, version, room_id, session_id) + } +} diff --git a/src/service/media/data.rs b/src/service/media/data.rs new file mode 100644 index 00000000..75a682cb --- /dev/null +++ b/src/service/media/data.rs @@ -0,0 +1,20 @@ +use crate::Result; + +pub trait Data: Send + Sync { + fn create_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + content_disposition: Option<&str>, + content_type: Option<&str>, + ) -> Result>; + + /// Returns content_disposition, content_type and the metadata key. + fn search_file_metadata( + &self, + mxc: String, + width: u32, + height: u32, + ) -> Result<(Option, Option, Vec)>; +} diff --git a/src/service/media/mod.rs b/src/service/media/mod.rs new file mode 100644 index 00000000..93937533 --- /dev/null +++ b/src/service/media/mod.rs @@ -0,0 +1,226 @@ +mod data; +use std::io::Cursor; + +pub use data::Data; + +use crate::{services, Result}; +use image::imageops::FilterType; + +use tokio::{ + fs::File, + io::{AsyncReadExt, AsyncWriteExt}, +}; + +pub struct FileMeta { + pub content_disposition: Option, + pub content_type: Option, + pub file: Vec, +} + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Uploads a file. + pub async fn create( + &self, + mxc: String, + content_disposition: Option<&str>, + content_type: Option<&str>, + file: &[u8], + ) -> Result<()> { + // Width, Height = 0 if it's not a thumbnail + let key = self + .db + .create_file_metadata(mxc, 0, 0, content_disposition, content_type)?; + + let path = services().globals.get_media_file(&key); + let mut f = File::create(path).await?; + f.write_all(file).await?; + Ok(()) + } + + /// Uploads or replaces a file thumbnail. + #[allow(clippy::too_many_arguments)] + pub async fn upload_thumbnail( + &self, + mxc: String, + content_disposition: Option<&str>, + content_type: Option<&str>, + width: u32, + height: u32, + file: &[u8], + ) -> Result<()> { + let key = + self.db + .create_file_metadata(mxc, width, height, content_disposition, content_type)?; + + let path = services().globals.get_media_file(&key); + let mut f = File::create(path).await?; + f.write_all(file).await?; + + Ok(()) + } + + /// Downloads a file. + pub async fn get(&self, mxc: String) -> Result> { + if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc, 0, 0) + { + let path = services().globals.get_media_file(&key); + let mut file = Vec::new(); + File::open(path).await?.read_to_end(&mut file).await?; + + Ok(Some(FileMeta { + content_disposition, + content_type, + file, + })) + } else { + Ok(None) + } + } + + /// Returns width, height of the thumbnail and whether it should be cropped. Returns None when + /// the server should send the original file. + pub fn thumbnail_properties(&self, width: u32, height: u32) -> Option<(u32, u32, bool)> { + match (width, height) { + (0..=32, 0..=32) => Some((32, 32, true)), + (0..=96, 0..=96) => Some((96, 96, true)), + (0..=320, 0..=240) => Some((320, 240, false)), + (0..=640, 0..=480) => Some((640, 480, false)), + (0..=800, 0..=600) => Some((800, 600, false)), + _ => None, + } + } + + /// Downloads a file's thumbnail. + /// + /// Here's an example on how it works: + /// + /// - Client requests an image with width=567, height=567 + /// - Server rounds that up to (800, 600), so it doesn't have to save too many thumbnails + /// - Server rounds that up again to (958, 600) to fix the aspect ratio (only for width,height>96) + /// - Server creates the thumbnail and sends it to the user + /// + /// For width,height <= 96 the server uses another thumbnailing algorithm which crops the image afterwards. + pub async fn get_thumbnail( + &self, + mxc: String, + width: u32, + height: u32, + ) -> Result> { + let (width, height, crop) = self + .thumbnail_properties(width, height) + .unwrap_or((0, 0, false)); // 0, 0 because that's the original file + + if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc.clone(), width, height) + { + // Using saved thumbnail + let path = services().globals.get_media_file(&key); + let mut file = Vec::new(); + File::open(path).await?.read_to_end(&mut file).await?; + + Ok(Some(FileMeta { + content_disposition, + content_type, + file: file.to_vec(), + })) + } else if let Ok((content_disposition, content_type, key)) = + self.db.search_file_metadata(mxc.clone(), 0, 0) + { + // Generate a thumbnail + let path = services().globals.get_media_file(&key); + let mut file = Vec::new(); + File::open(path).await?.read_to_end(&mut file).await?; + + if let Ok(image) = image::load_from_memory(&file) { + let original_width = image.width(); + let original_height = image.height(); + if width > original_width || height > original_height { + return Ok(Some(FileMeta { + content_disposition, + content_type, + file: file.to_vec(), + })); + } + + let thumbnail = if crop { + image.resize_to_fill(width, height, FilterType::CatmullRom) + } else { + let (exact_width, exact_height) = { + // Copied from image::dynimage::resize_dimensions + let ratio = u64::from(original_width) * u64::from(height); + let nratio = u64::from(width) * u64::from(original_height); + + let use_width = nratio <= ratio; + let intermediate = if use_width { + u64::from(original_height) * u64::from(width) + / u64::from(original_width) + } else { + u64::from(original_width) * u64::from(height) + / u64::from(original_height) + }; + if use_width { + if intermediate <= u64::from(::std::u32::MAX) { + (width, intermediate as u32) + } else { + ( + (u64::from(width) * u64::from(::std::u32::MAX) / intermediate) + as u32, + ::std::u32::MAX, + ) + } + } else if intermediate <= u64::from(::std::u32::MAX) { + (intermediate as u32, height) + } else { + ( + ::std::u32::MAX, + (u64::from(height) * u64::from(::std::u32::MAX) / intermediate) + as u32, + ) + } + }; + + image.thumbnail_exact(exact_width, exact_height) + }; + + let mut thumbnail_bytes = Vec::new(); + thumbnail.write_to( + &mut Cursor::new(&mut thumbnail_bytes), + image::ImageOutputFormat::Png, + )?; + + // Save thumbnail in database so we don't have to generate it again next time + let thumbnail_key = self.db.create_file_metadata( + mxc, + width, + height, + content_disposition.as_deref(), + content_type.as_deref(), + )?; + + let path = services().globals.get_media_file(&thumbnail_key); + let mut f = File::create(path).await?; + f.write_all(&thumbnail_bytes).await?; + + Ok(Some(FileMeta { + content_disposition, + content_type, + file: thumbnail_bytes.to_vec(), + })) + } else { + // Couldn't parse file to generate thumbnail, send original + Ok(Some(FileMeta { + content_disposition, + content_type, + file: file.to_vec(), + })) + } + } else { + Ok(None) + } + } +} diff --git a/src/service/mod.rs b/src/service/mod.rs new file mode 100644 index 00000000..385dcc69 --- /dev/null +++ b/src/service/mod.rs @@ -0,0 +1,106 @@ +use std::{ + collections::HashMap, + sync::{Arc, Mutex}, +}; + +use lru_cache::LruCache; + +use crate::{Config, Result}; + +pub mod account_data; +pub mod admin; +pub mod appservice; +pub mod globals; +pub mod key_backups; +pub mod media; +pub mod pdu; +pub mod pusher; +pub mod rooms; +pub mod sending; +pub mod transaction_ids; +pub mod uiaa; +pub mod users; + +pub struct Services { + pub appservice: appservice::Service, + pub pusher: pusher::Service, + pub rooms: rooms::Service, + pub transaction_ids: transaction_ids::Service, + pub uiaa: uiaa::Service, + pub users: users::Service, + pub account_data: account_data::Service, + pub admin: Arc, + pub globals: globals::Service, + pub key_backups: key_backups::Service, + pub media: media::Service, + pub sending: Arc, +} + +impl Services { + pub fn build< + D: appservice::Data + + pusher::Data + + rooms::Data + + transaction_ids::Data + + uiaa::Data + + users::Data + + account_data::Data + + globals::Data + + key_backups::Data + + media::Data + + sending::Data + + 'static, + >( + db: &'static D, + config: Config, + ) -> Result { + Ok(Self { + appservice: appservice::Service { db }, + pusher: pusher::Service { db }, + rooms: rooms::Service { + alias: rooms::alias::Service { db }, + auth_chain: rooms::auth_chain::Service { db }, + directory: rooms::directory::Service { db }, + edus: rooms::edus::Service { + presence: rooms::edus::presence::Service { db }, + read_receipt: rooms::edus::read_receipt::Service { db }, + typing: rooms::edus::typing::Service { db }, + }, + event_handler: rooms::event_handler::Service, + lazy_loading: rooms::lazy_loading::Service { + db, + lazy_load_waiting: Mutex::new(HashMap::new()), + }, + metadata: rooms::metadata::Service { db }, + outlier: rooms::outlier::Service { db }, + pdu_metadata: rooms::pdu_metadata::Service { db }, + search: rooms::search::Service { db }, + short: rooms::short::Service { db }, + state: rooms::state::Service { db }, + state_accessor: rooms::state_accessor::Service { db }, + state_cache: rooms::state_cache::Service { db }, + state_compressor: rooms::state_compressor::Service { + db, + stateinfo_cache: Mutex::new(LruCache::new( + (100.0 * config.conduit_cache_capacity_modifier) as usize, + )), + }, + timeline: rooms::timeline::Service { + db, + lasttimelinecount_cache: Mutex::new(HashMap::new()), + }, + user: rooms::user::Service { db }, + }, + transaction_ids: transaction_ids::Service { db }, + uiaa: uiaa::Service { db }, + users: users::Service { db }, + account_data: account_data::Service { db }, + admin: admin::Service::build(), + key_backups: key_backups::Service { db }, + media: media::Service { db }, + sending: sending::Service::build(db, &config), + + globals: globals::Service::load(db, config)?, + }) + } +} diff --git a/src/pdu.rs b/src/service/pdu.rs similarity index 83% rename from src/pdu.rs rename to src/service/pdu.rs index 0f99f43b..593a687b 100644 --- a/src/pdu.rs +++ b/src/service/pdu.rs @@ -1,19 +1,20 @@ -use crate::Error; +use crate::{services, Error}; use ruma::{ events::{ - room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyInitialStateEvent, - AnyRoomEvent, AnyStateEvent, AnyStrippedStateEvent, AnySyncRoomEvent, AnySyncStateEvent, - EventType, StateEvent, + room::member::RoomMemberEventContent, AnyEphemeralRoomEvent, AnyStateEvent, + AnyStrippedStateEvent, AnySyncStateEvent, AnySyncTimelineEvent, AnyTimelineEvent, + RoomEventType, StateEvent, }, - serde::{CanonicalJsonObject, CanonicalJsonValue, Raw}, - state_res, EventId, MilliSecondsSinceUnixEpoch, RoomId, RoomVersionId, UInt, UserId, + serde::Raw, + state_res, CanonicalJsonObject, CanonicalJsonValue, EventId, MilliSecondsSinceUnixEpoch, + OwnedEventId, OwnedRoomId, OwnedUserId, RoomId, UInt, UserId, }; use serde::{Deserialize, Serialize}; use serde_json::{ json, value::{to_raw_value, RawValue as RawJsonValue}, }; -use std::{cmp::Ordering, collections::BTreeMap, convert::TryFrom}; +use std::{cmp::Ordering, collections::BTreeMap, sync::Arc}; use tracing::warn; /// Content hashes of a PDU. @@ -25,20 +26,20 @@ pub struct EventHash { #[derive(Clone, Deserialize, Serialize, Debug)] pub struct PduEvent { - pub event_id: EventId, - pub room_id: RoomId, - pub sender: UserId, + pub event_id: Arc, + pub room_id: OwnedRoomId, + pub sender: OwnedUserId, pub origin_server_ts: UInt, #[serde(rename = "type")] - pub kind: EventType, + pub kind: RoomEventType, pub content: Box, #[serde(skip_serializing_if = "Option::is_none")] pub state_key: Option, - pub prev_events: Vec, + pub prev_events: Vec>, pub depth: UInt, - pub auth_events: Vec, + pub auth_events: Vec>, #[serde(skip_serializing_if = "Option::is_none")] - pub redacts: Option, + pub redacts: Option>, #[serde(default, skip_serializing_if = "Option::is_none")] pub unsigned: Option>, pub hashes: EventHash, @@ -52,10 +53,10 @@ impl PduEvent { self.unsigned = None; let allowed: &[&str] = match self.kind { - EventType::RoomMember => &["membership"], - EventType::RoomCreate => &["creator"], - EventType::RoomJoinRules => &["join_rule"], - EventType::RoomPowerLevels => &[ + RoomEventType::RoomMember => &["join_authorised_via_users_server", "membership"], + RoomEventType::RoomCreate => &["creator"], + RoomEventType::RoomJoinRules => &["join_rule"], + RoomEventType::RoomPowerLevels => &[ "ban", "events", "events_default", @@ -65,7 +66,7 @@ impl PduEvent { "users", "users_default", ], - EventType::RoomHistoryVisibility => &["history_visibility"], + RoomEventType::RoomHistoryVisibility => &["history_visibility"], _ => &[], }; @@ -103,7 +104,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_sync_room_event(&self) -> Raw { + pub fn to_sync_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -147,7 +148,7 @@ impl PduEvent { } #[tracing::instrument(skip(self))] - pub fn to_room_event(&self) -> Raw { + pub fn to_room_event(&self) -> Raw { let mut json = json!({ "content": self.content, "type": self.kind, @@ -266,7 +267,9 @@ impl PduEvent { } impl state_res::Event for PduEvent { - fn event_id(&self) -> &EventId { + type Id = Arc; + + fn event_id(&self) -> &Self::Id { &self.event_id } @@ -278,7 +281,7 @@ impl state_res::Event for PduEvent { &self.sender } - fn event_type(&self) -> &EventType { + fn event_type(&self) -> &RoomEventType { &self.kind } @@ -294,15 +297,15 @@ impl state_res::Event for PduEvent { self.state_key.as_deref() } - fn prev_events(&self) -> Box + '_> { + fn prev_events(&self) -> Box + '_> { Box::new(self.prev_events.iter()) } - fn auth_events(&self) -> Box + '_> { + fn auth_events(&self) -> Box + '_> { Box::new(self.auth_events.iter()) } - fn redacts(&self) -> Option<&EventId> { + fn redacts(&self) -> Option<&Self::Id> { self.redacts.as_ref() } } @@ -331,44 +334,38 @@ impl Ord for PduEvent { /// Returns a tuple of the new `EventId` and the PDU as a `BTreeMap`. pub(crate) fn gen_event_id_canonical_json( pdu: &RawJsonValue, -) -> crate::Result<(EventId, CanonicalJsonObject)> { - let value = serde_json::from_str(pdu.get()).map_err(|e| { +) -> crate::Result<(OwnedEventId, CanonicalJsonObject)> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { warn!("Error parsing incoming event {:?}: {:?}", pdu, e); Error::BadServerResponse("Invalid PDU in server response") })?; - let event_id = EventId::try_from(&*format!( + let room_id = value + .get("room_id") + .and_then(|id| RoomId::parse(id.as_str()?).ok()) + .ok_or_else(|| Error::bad_database("PDU in db has invalid room_id."))?; + + let room_version_id = services().rooms.state.get_room_version(&room_id); + + let event_id = format!( "${}", // Anything higher than version3 behaves the same - ruma::signatures::reference_hash(&value, &RoomVersionId::Version6) + ruma::signatures::reference_hash(&value, &room_version_id?) .expect("ruma can calculate reference hashes") - )) + ) + .try_into() .expect("ruma's reference hashes are valid event ids"); Ok((event_id, value)) } -/// Build the start of a PDU in order to add it to the `Database`. +/// Build the start of a PDU in order to add it to the Database. #[derive(Debug, Deserialize)] pub struct PduBuilder { #[serde(rename = "type")] - pub event_type: EventType, + pub event_type: RoomEventType, pub content: Box, pub unsigned: Option>, pub state_key: Option, - pub redacts: Option, -} - -/// Direct conversion prevents loss of the empty `state_key` that ruma requires. -impl From for PduBuilder { - fn from(event: AnyInitialStateEvent) -> Self { - Self { - event_type: EventType::from(event.event_type()), - content: to_raw_value(&event.content()) - .expect("AnyStateEventContent came from JSON and can thus turn back into JSON."), - unsigned: None, - state_key: Some(event.state_key().to_owned()), - redacts: None, - } - } + pub redacts: Option>, } diff --git a/src/service/pusher/data.rs b/src/service/pusher/data.rs new file mode 100644 index 00000000..e3171210 --- /dev/null +++ b/src/service/pusher/data.rs @@ -0,0 +1,17 @@ +use crate::Result; +use ruma::{ + api::client::push::{get_pushers, set_pusher}, + UserId, +}; + +pub trait Data: Send + Sync { + fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()>; + + fn get_pusher(&self, sender: &UserId, pushkey: &str) + -> Result>; + + fn get_pushers(&self, sender: &UserId) -> Result>; + + fn get_pushkeys<'a>(&'a self, sender: &UserId) + -> Box> + 'a>; +} diff --git a/src/service/pusher/mod.rs b/src/service/pusher/mod.rs new file mode 100644 index 00000000..7fee276b --- /dev/null +++ b/src/service/pusher/mod.rs @@ -0,0 +1,311 @@ +mod data; +pub use data::Data; +use ruma::events::AnySyncTimelineEvent; + +use crate::{services, Error, PduEvent, Result}; +use bytes::BytesMut; +use ruma::{ + api::{ + client::push::{get_pushers, set_pusher, PusherKind}, + push_gateway::send_event_notification::{ + self, + v1::{Device, Notification, NotificationCounts, NotificationPriority}, + }, + IncomingResponse, MatrixVersion, OutgoingRequest, SendAccessToken, + }, + events::{ + room::{name::RoomNameEventContent, power_levels::RoomPowerLevelsEventContent}, + RoomEventType, StateEventType, + }, + push::{Action, PushConditionRoomCtx, PushFormat, Ruleset, Tweak}, + serde::Raw, + uint, RoomId, UInt, UserId, +}; + +use std::{fmt::Debug, mem}; +use tracing::{error, info, warn}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + pub fn set_pusher(&self, sender: &UserId, pusher: set_pusher::v3::Pusher) -> Result<()> { + self.db.set_pusher(sender, pusher) + } + + pub fn get_pusher( + &self, + sender: &UserId, + pushkey: &str, + ) -> Result> { + self.db.get_pusher(sender, pushkey) + } + + pub fn get_pushers(&self, sender: &UserId) -> Result> { + self.db.get_pushers(sender) + } + + pub fn get_pushkeys<'a>(&'a self, sender: &UserId) -> Box>> { + self.db.get_pushkeys(sender) + } + + #[tracing::instrument(skip(self, destination, request))] + pub async fn send_request( + &self, + destination: &str, + request: T, + ) -> Result + where + T: Debug, + { + let destination = destination.replace("/_matrix/push/v1/notify", ""); + + let http_request = request + .try_into_http_request::( + &destination, + SendAccessToken::IfRequired(""), + &[MatrixVersion::V1_0], + ) + .map_err(|e| { + warn!("Failed to find destination {}: {}", destination, e); + Error::BadServerResponse("Invalid destination") + })? + .map(|body| body.freeze()); + + let reqwest_request = reqwest::Request::try_from(http_request) + .expect("all http requests are valid reqwest requests"); + + // TODO: we could keep this very short and let expo backoff do it's thing... + //*reqwest_request.timeout_mut() = Some(Duration::from_secs(5)); + + let url = reqwest_request.url().clone(); + let response = services() + .globals + .default_client() + .execute(reqwest_request) + .await; + + match response { + Ok(mut response) => { + // reqwest::Response -> http::Response conversion + let status = response.status(); + let mut http_response_builder = http::Response::builder() + .status(status) + .version(response.version()); + mem::swap( + response.headers_mut(), + http_response_builder + .headers_mut() + .expect("http::response::Builder is usable"), + ); + + let body = response.bytes().await.unwrap_or_else(|e| { + warn!("server error {}", e); + Vec::new().into() + }); // TODO: handle timeout + + if status != 200 { + info!( + "Push gateway returned bad response {} {}\n{}\n{:?}", + destination, + status, + url, + crate::utils::string_from_bytes(&body) + ); + } + + let response = T::IncomingResponse::try_from_http_response( + http_response_builder + .body(body) + .expect("reqwest body is valid http body"), + ); + response.map_err(|_| { + info!( + "Push gateway returned invalid response bytes {}\n{}", + destination, url + ); + Error::BadServerResponse("Push gateway returned bad response.") + }) + } + Err(e) => { + warn!("Could not send request to pusher {}: {}", destination, e); + Err(e.into()) + } + } + } + + #[tracing::instrument(skip(self, user, unread, pusher, ruleset, pdu))] + pub async fn send_push_notice( + &self, + user: &UserId, + unread: UInt, + pusher: &get_pushers::v3::Pusher, + ruleset: Ruleset, + pdu: &PduEvent, + ) -> Result<()> { + let mut notify = None; + let mut tweaks = Vec::new(); + + let power_levels: RoomPowerLevelsEventContent = services() + .rooms + .state_accessor + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + for action in self.get_actions( + user, + &ruleset, + &power_levels, + &pdu.to_sync_room_event(), + &pdu.room_id, + )? { + let n = match action { + Action::DontNotify => false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => true, + Action::SetTweak(tweak) => { + tweaks.push(tweak.clone()); + continue; + } + }; + + if notify.is_some() { + return Err(Error::bad_database( + r#"Malformed pushrule contains more than one of these actions: ["dont_notify", "notify", "coalesce"]"#, + )); + } + + notify = Some(n); + } + + if notify == Some(true) { + self.send_notice(unread, pusher, tweaks, pdu).await?; + } + // Else the event triggered no actions + + Ok(()) + } + + #[tracing::instrument(skip(self, user, ruleset, pdu))] + pub fn get_actions<'a>( + &self, + user: &UserId, + ruleset: &'a Ruleset, + power_levels: &RoomPowerLevelsEventContent, + pdu: &Raw, + room_id: &RoomId, + ) -> Result<&'a [Action]> { + let ctx = PushConditionRoomCtx { + room_id: room_id.to_owned(), + member_count: 10_u32.into(), // TODO: get member count efficiently + user_id: user.to_owned(), + user_display_name: services() + .users + .displayname(user)? + .unwrap_or_else(|| user.localpart().to_owned()), + users_power_levels: power_levels.users.clone(), + default_power_level: power_levels.users_default, + notification_power_levels: power_levels.notifications.clone(), + }; + + Ok(ruleset.get_actions(pdu, &ctx)) + } + + #[tracing::instrument(skip(self, unread, pusher, tweaks, event))] + async fn send_notice( + &self, + unread: UInt, + pusher: &get_pushers::v3::Pusher, + tweaks: Vec, + event: &PduEvent, + ) -> Result<()> { + // TODO: email + if pusher.kind == PusherKind::Email { + return Ok(()); + } + + // TODO: + // Two problems with this + // 1. if "event_id_only" is the only format kind it seems we should never add more info + // 2. can pusher/devices have conflicting formats + let event_id_only = pusher.data.format == Some(PushFormat::EventIdOnly); + let url = if let Some(url) = &pusher.data.url { + url + } else { + error!("Http Pusher must have URL specified."); + return Ok(()); + }; + + let mut device = Device::new(pusher.app_id.clone(), pusher.pushkey.clone()); + let mut data_minus_url = pusher.data.clone(); + // The url must be stripped off according to spec + data_minus_url.url = None; + device.data = data_minus_url.into(); + + // Tweaks are only added if the format is NOT event_id_only + if !event_id_only { + device.tweaks = tweaks.clone(); + } + + let d = &[device]; + let mut notifi = Notification::new(d); + + notifi.prio = NotificationPriority::Low; + notifi.event_id = Some(&event.event_id); + notifi.room_id = Some(&event.room_id); + // TODO: missed calls + notifi.counts = NotificationCounts::new(unread, uint!(0)); + + if event.kind == RoomEventType::RoomEncrypted + || tweaks + .iter() + .any(|t| matches!(t, Tweak::Highlight(true) | Tweak::Sound(_))) + { + notifi.prio = NotificationPriority::High + } + + if event_id_only { + self.send_request(url, send_event_notification::v1::Request::new(notifi)) + .await?; + } else { + notifi.sender = Some(&event.sender); + notifi.event_type = Some(&event.kind); + let content = serde_json::value::to_raw_value(&event.content).ok(); + notifi.content = content.as_deref(); + + if event.kind == RoomEventType::RoomMember { + notifi.user_is_target = event.state_key.as_deref() == Some(event.sender.as_str()); + } + + let user_name = services().users.displayname(&event.sender)?; + notifi.sender_display_name = user_name.as_deref(); + + let room_name = if let Some(room_name_pdu) = services() + .rooms + .state_accessor + .room_state_get(&event.room_id, &StateEventType::RoomName, "")? + { + serde_json::from_str::(room_name_pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid room name event in database."))? + .name + } else { + None + }; + + notifi.room_name = room_name.as_deref(); + + self.send_request(url, send_event_notification::v1::Request::new(notifi)) + .await?; + } + + // TODO: email + + Ok(()) + } +} diff --git a/src/service/rooms/alias/data.rs b/src/service/rooms/alias/data.rs new file mode 100644 index 00000000..629b1ee1 --- /dev/null +++ b/src/service/rooms/alias/data.rs @@ -0,0 +1,19 @@ +use crate::Result; +use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; + +pub trait Data: Send + Sync { + /// Creates or updates the alias to the given room id. + fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()>; + + /// Forgets about an alias. Returns an error if the alias did not exist. + fn remove_alias(&self, alias: &RoomAliasId) -> Result<()>; + + /// Looks up the roomid for the given alias. + fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result>; + + /// Returns all local aliases that point to the given room + fn local_aliases_for_room<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a>; +} diff --git a/src/service/rooms/alias/mod.rs b/src/service/rooms/alias/mod.rs new file mode 100644 index 00000000..d26030c0 --- /dev/null +++ b/src/service/rooms/alias/mod.rs @@ -0,0 +1,35 @@ +mod data; + +pub use data::Data; + +use crate::Result; +use ruma::{OwnedRoomAliasId, OwnedRoomId, RoomAliasId, RoomId}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + #[tracing::instrument(skip(self))] + pub fn set_alias(&self, alias: &RoomAliasId, room_id: &RoomId) -> Result<()> { + self.db.set_alias(alias, room_id) + } + + #[tracing::instrument(skip(self))] + pub fn remove_alias(&self, alias: &RoomAliasId) -> Result<()> { + self.db.remove_alias(alias) + } + + #[tracing::instrument(skip(self))] + pub fn resolve_local_alias(&self, alias: &RoomAliasId) -> Result> { + self.db.resolve_local_alias(alias) + } + + #[tracing::instrument(skip(self))] + pub fn local_aliases_for_room<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a> { + self.db.local_aliases_for_room(room_id) + } +} diff --git a/src/service/rooms/auth_chain/data.rs b/src/service/rooms/auth_chain/data.rs new file mode 100644 index 00000000..e8c379fc --- /dev/null +++ b/src/service/rooms/auth_chain/data.rs @@ -0,0 +1,11 @@ +use crate::Result; +use std::{collections::HashSet, sync::Arc}; + +pub trait Data: Send + Sync { + fn get_cached_eventid_authchain( + &self, + shorteventid: &[u64], + ) -> Result>>>; + fn cache_auth_chain(&self, shorteventid: Vec, auth_chain: Arc>) + -> Result<()>; +} diff --git a/src/service/rooms/auth_chain/mod.rs b/src/service/rooms/auth_chain/mod.rs new file mode 100644 index 00000000..d3b6e401 --- /dev/null +++ b/src/service/rooms/auth_chain/mod.rs @@ -0,0 +1,165 @@ +mod data; +use std::{ + collections::{BTreeSet, HashSet}, + sync::Arc, +}; + +pub use data::Data; +use ruma::{api::client::error::ErrorKind, EventId, RoomId}; +use tracing::log::warn; + +use crate::{services, Error, Result}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + #[tracing::instrument(skip(self))] + pub fn get_cached_eventid_authchain<'a>( + &'a self, + key: &[u64], + ) -> Result>>> { + self.db.get_cached_eventid_authchain(key) + } + + #[tracing::instrument(skip(self))] + pub fn cache_auth_chain(&self, key: Vec, auth_chain: Arc>) -> Result<()> { + self.db.cache_auth_chain(key, auth_chain) + } + + #[tracing::instrument(skip(self, starting_events))] + pub async fn get_auth_chain<'a>( + &self, + room_id: &RoomId, + starting_events: Vec>, + ) -> Result> + 'a> { + const NUM_BUCKETS: usize = 50; + + let mut buckets = vec![BTreeSet::new(); NUM_BUCKETS]; + + let mut i = 0; + for id in starting_events { + let short = services().rooms.short.get_or_create_shorteventid(&id)?; + let bucket_id = (short % NUM_BUCKETS as u64) as usize; + buckets[bucket_id].insert((short, id.clone())); + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + } + + let mut full_auth_chain = HashSet::new(); + + let mut hits = 0; + let mut misses = 0; + for chunk in buckets { + if chunk.is_empty() { + continue; + } + + let chunk_key: Vec = chunk.iter().map(|(short, _)| short).copied().collect(); + if let Some(cached) = services() + .rooms + .auth_chain + .get_cached_eventid_authchain(&chunk_key)? + { + hits += 1; + full_auth_chain.extend(cached.iter().copied()); + continue; + } + misses += 1; + + let mut chunk_cache = HashSet::new(); + let mut hits2 = 0; + let mut misses2 = 0; + let mut i = 0; + for (sevent_id, event_id) in chunk { + if let Some(cached) = services() + .rooms + .auth_chain + .get_cached_eventid_authchain(&[sevent_id])? + { + hits2 += 1; + chunk_cache.extend(cached.iter().copied()); + } else { + misses2 += 1; + let auth_chain = Arc::new(self.get_auth_chain_inner(room_id, &event_id)?); + services() + .rooms + .auth_chain + .cache_auth_chain(vec![sevent_id], Arc::clone(&auth_chain))?; + println!( + "cache missed event {} with auth chain len {}", + event_id, + auth_chain.len() + ); + chunk_cache.extend(auth_chain.iter()); + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + }; + } + println!( + "chunk missed with len {}, event hits2: {}, misses2: {}", + chunk_cache.len(), + hits2, + misses2 + ); + let chunk_cache = Arc::new(chunk_cache); + services() + .rooms + .auth_chain + .cache_auth_chain(chunk_key, Arc::clone(&chunk_cache))?; + full_auth_chain.extend(chunk_cache.iter()); + } + + println!( + "total: {}, chunk hits: {}, misses: {}", + full_auth_chain.len(), + hits, + misses + ); + + Ok(full_auth_chain + .into_iter() + .filter_map(move |sid| services().rooms.short.get_eventid_from_short(sid).ok())) + } + + #[tracing::instrument(skip(self, event_id))] + fn get_auth_chain_inner(&self, room_id: &RoomId, event_id: &EventId) -> Result> { + let mut todo = vec![Arc::from(event_id)]; + let mut found = HashSet::new(); + + while let Some(event_id) = todo.pop() { + match services().rooms.timeline.get_pdu(&event_id) { + Ok(Some(pdu)) => { + if pdu.room_id != room_id { + return Err(Error::BadRequest(ErrorKind::Forbidden, "Evil event in db")); + } + for auth_event in &pdu.auth_events { + let sauthevent = services() + .rooms + .short + .get_or_create_shorteventid(auth_event)?; + + if !found.contains(&sauthevent) { + found.insert(sauthevent); + todo.push(auth_event.clone()); + } + } + } + Ok(None) => { + warn!("Could not find pdu mentioned in auth events: {}", event_id); + } + Err(e) => { + warn!("Could not load event in auth chain: {} {}", event_id, e); + } + } + } + + Ok(found) + } +} diff --git a/src/service/rooms/directory/data.rs b/src/service/rooms/directory/data.rs new file mode 100644 index 00000000..aca731ce --- /dev/null +++ b/src/service/rooms/directory/data.rs @@ -0,0 +1,16 @@ +use crate::Result; +use ruma::{OwnedRoomId, RoomId}; + +pub trait Data: Send + Sync { + /// Adds the room to the public room directory + fn set_public(&self, room_id: &RoomId) -> Result<()>; + + /// Removes the room from the public room directory. + fn set_not_public(&self, room_id: &RoomId) -> Result<()>; + + /// Returns true if the room is in the public room directory. + fn is_public_room(&self, room_id: &RoomId) -> Result; + + /// Returns the unsorted public room directory + fn public_rooms<'a>(&'a self) -> Box> + 'a>; +} diff --git a/src/service/rooms/directory/mod.rs b/src/service/rooms/directory/mod.rs new file mode 100644 index 00000000..10f782bb --- /dev/null +++ b/src/service/rooms/directory/mod.rs @@ -0,0 +1,32 @@ +mod data; + +pub use data::Data; +use ruma::{OwnedRoomId, RoomId}; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + #[tracing::instrument(skip(self))] + pub fn set_public(&self, room_id: &RoomId) -> Result<()> { + self.db.set_public(room_id) + } + + #[tracing::instrument(skip(self))] + pub fn set_not_public(&self, room_id: &RoomId) -> Result<()> { + self.db.set_not_public(room_id) + } + + #[tracing::instrument(skip(self))] + pub fn is_public_room(&self, room_id: &RoomId) -> Result { + self.db.is_public_room(room_id) + } + + #[tracing::instrument(skip(self))] + pub fn public_rooms(&self) -> impl Iterator> + '_ { + self.db.public_rooms() + } +} diff --git a/src/service/rooms/edus/mod.rs b/src/service/rooms/edus/mod.rs new file mode 100644 index 00000000..cf7a3591 --- /dev/null +++ b/src/service/rooms/edus/mod.rs @@ -0,0 +1,11 @@ +pub mod presence; +pub mod read_receipt; +pub mod typing; + +pub trait Data: presence::Data + read_receipt::Data + typing::Data + 'static {} + +pub struct Service { + pub presence: presence::Service, + pub read_receipt: read_receipt::Service, + pub typing: typing::Service, +} diff --git a/src/service/rooms/edus/presence/data.rs b/src/service/rooms/edus/presence/data.rs new file mode 100644 index 00000000..53329e08 --- /dev/null +++ b/src/service/rooms/edus/presence/data.rs @@ -0,0 +1,38 @@ +use std::collections::HashMap; + +use crate::Result; +use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; + +pub trait Data: Send + Sync { + /// Adds a presence event which will be saved until a new event replaces it. + /// + /// Note: This method takes a RoomId because presence updates are always bound to rooms to + /// make sure users outside these rooms can't see them. + fn update_presence( + &self, + user_id: &UserId, + room_id: &RoomId, + presence: PresenceEvent, + ) -> Result<()>; + + /// Resets the presence timeout, so the user will stay in their current presence state. + fn ping_presence(&self, user_id: &UserId) -> Result<()>; + + /// Returns the timestamp of the last presence update of this user in millis since the unix epoch. + fn last_presence_update(&self, user_id: &UserId) -> Result>; + + /// Returns the presence event with correct last_active_ago. + fn get_presence_event( + &self, + room_id: &RoomId, + user_id: &UserId, + count: u64, + ) -> Result>; + + /// Returns the most recent presence updates that happened after the event with id `since`. + fn presence_since( + &self, + room_id: &RoomId, + since: u64, + ) -> Result>; +} diff --git a/src/service/rooms/edus/presence/mod.rs b/src/service/rooms/edus/presence/mod.rs new file mode 100644 index 00000000..860aea18 --- /dev/null +++ b/src/service/rooms/edus/presence/mod.rs @@ -0,0 +1,122 @@ +mod data; +use std::collections::HashMap; + +pub use data::Data; +use ruma::{events::presence::PresenceEvent, OwnedUserId, RoomId, UserId}; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Adds a presence event which will be saved until a new event replaces it. + /// + /// Note: This method takes a RoomId because presence updates are always bound to rooms to + /// make sure users outside these rooms can't see them. + pub fn update_presence( + &self, + user_id: &UserId, + room_id: &RoomId, + presence: PresenceEvent, + ) -> Result<()> { + self.db.update_presence(user_id, room_id, presence) + } + + /// Resets the presence timeout, so the user will stay in their current presence state. + pub fn ping_presence(&self, user_id: &UserId) -> Result<()> { + self.db.ping_presence(user_id) + } + + pub fn get_last_presence_event( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result> { + let last_update = match self.db.last_presence_update(user_id)? { + Some(last) => last, + None => return Ok(None), + }; + + self.db.get_presence_event(room_id, user_id, last_update) + } + + /* TODO + /// Sets all users to offline who have been quiet for too long. + fn _presence_maintain( + &self, + rooms: &super::Rooms, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let current_timestamp = utils::millis_since_unix_epoch(); + + for (user_id_bytes, last_timestamp) in self + .userid_lastpresenceupdate + .iter() + .filter_map(|(k, bytes)| { + Some(( + k, + utils::u64_from_bytes(&bytes) + .map_err(|_| { + Error::bad_database("Invalid timestamp in userid_lastpresenceupdate.") + }) + .ok()?, + )) + }) + .take_while(|(_, timestamp)| current_timestamp.saturating_sub(*timestamp) > 5 * 60_000) + // 5 Minutes + { + // Send new presence events to set the user offline + let count = globals.next_count()?.to_be_bytes(); + let user_id: Box<_> = utils::string_from_bytes(&user_id_bytes) + .map_err(|_| { + Error::bad_database("Invalid UserId bytes in userid_lastpresenceupdate.") + })? + .try_into() + .map_err(|_| Error::bad_database("Invalid UserId in userid_lastpresenceupdate."))?; + for room_id in rooms.rooms_joined(&user_id).filter_map(|r| r.ok()) { + let mut presence_id = room_id.as_bytes().to_vec(); + presence_id.push(0xff); + presence_id.extend_from_slice(&count); + presence_id.push(0xff); + presence_id.extend_from_slice(&user_id_bytes); + + self.presenceid_presence.insert( + &presence_id, + &serde_json::to_vec(&PresenceEvent { + content: PresenceEventContent { + avatar_url: None, + currently_active: None, + displayname: None, + last_active_ago: Some( + last_timestamp.try_into().expect("time is valid"), + ), + presence: PresenceState::Offline, + status_msg: None, + }, + sender: user_id.to_owned(), + }) + .expect("PresenceEvent can be serialized"), + )?; + } + + self.userid_lastpresenceupdate.insert( + user_id.as_bytes(), + &utils::millis_since_unix_epoch().to_be_bytes(), + )?; + } + + Ok(()) + }*/ + + /// Returns the most recent presence updates that happened after the event with id `since`. + #[tracing::instrument(skip(self, since, room_id))] + pub fn presence_since( + &self, + room_id: &RoomId, + since: u64, + ) -> Result> { + self.db.presence_since(room_id, since) + } +} diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs new file mode 100644 index 00000000..a183d196 --- /dev/null +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -0,0 +1,36 @@ +use crate::Result; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; + +pub trait Data: Send + Sync { + /// Replaces the previous read receipt. + fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: ReceiptEvent, + ) -> Result<()>; + + /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + fn readreceipts_since<'a>( + &'a self, + room_id: &RoomId, + since: u64, + ) -> Box< + dyn Iterator< + Item = Result<( + OwnedUserId, + u64, + Raw, + )>, + > + 'a, + >; + + /// Sets a private read marker at `count`. + fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()>; + + /// Returns the private read marker. + fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + /// Returns the count of the last typing update in this room. + fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result; +} diff --git a/src/service/rooms/edus/read_receipt/mod.rs b/src/service/rooms/edus/read_receipt/mod.rs new file mode 100644 index 00000000..c6035280 --- /dev/null +++ b/src/service/rooms/edus/read_receipt/mod.rs @@ -0,0 +1,55 @@ +mod data; + +pub use data::Data; + +use crate::Result; +use ruma::{events::receipt::ReceiptEvent, serde::Raw, OwnedUserId, RoomId, UserId}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Replaces the previous read receipt. + pub fn readreceipt_update( + &self, + user_id: &UserId, + room_id: &RoomId, + event: ReceiptEvent, + ) -> Result<()> { + self.db.readreceipt_update(user_id, room_id, event) + } + + /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + #[tracing::instrument(skip(self))] + pub fn readreceipts_since<'a>( + &'a self, + room_id: &RoomId, + since: u64, + ) -> impl Iterator< + Item = Result<( + OwnedUserId, + u64, + Raw, + )>, + > + 'a { + self.db.readreceipts_since(room_id, since) + } + + /// Sets a private read marker at `count`. + #[tracing::instrument(skip(self))] + pub fn private_read_set(&self, room_id: &RoomId, user_id: &UserId, count: u64) -> Result<()> { + self.db.private_read_set(room_id, user_id, count) + } + + /// Returns the private read marker. + #[tracing::instrument(skip(self))] + pub fn private_read_get(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + self.db.private_read_get(room_id, user_id) + } + + /// Returns the count of the last typing update in this room. + pub fn last_privateread_update(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.last_privateread_update(user_id, room_id) + } +} diff --git a/src/service/rooms/edus/typing/data.rs b/src/service/rooms/edus/typing/data.rs new file mode 100644 index 00000000..c4ad8673 --- /dev/null +++ b/src/service/rooms/edus/typing/data.rs @@ -0,0 +1,18 @@ +use crate::Result; +use ruma::{OwnedUserId, RoomId, UserId}; +use std::collections::HashSet; + +pub trait Data: Send + Sync { + /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is + /// called. + fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()>; + + /// Removes a user from typing before the timeout is reached. + fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + + /// Returns the count of the last typing update in this room. + fn last_typing_update(&self, room_id: &RoomId) -> Result; + + /// Returns all user ids currently typing. + fn typings_all(&self, room_id: &RoomId) -> Result>; +} diff --git a/src/service/rooms/edus/typing/mod.rs b/src/service/rooms/edus/typing/mod.rs new file mode 100644 index 00000000..d05ec900 --- /dev/null +++ b/src/service/rooms/edus/typing/mod.rs @@ -0,0 +1,88 @@ +mod data; + +pub use data::Data; +use ruma::{events::SyncEphemeralRoomEvent, RoomId, UserId}; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Sets a user as typing until the timeout timestamp is reached or roomtyping_remove is + /// called. + pub fn typing_add(&self, user_id: &UserId, room_id: &RoomId, timeout: u64) -> Result<()> { + self.db.typing_add(user_id, room_id, timeout) + } + + /// Removes a user from typing before the timeout is reached. + pub fn typing_remove(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + self.db.typing_remove(user_id, room_id) + } + + /* TODO: Do this in background thread? + /// Makes sure that typing events with old timestamps get removed. + fn typings_maintain( + &self, + room_id: &RoomId, + globals: &super::super::globals::Globals, + ) -> Result<()> { + let mut prefix = room_id.as_bytes().to_vec(); + prefix.push(0xff); + + let current_timestamp = utils::millis_since_unix_epoch(); + + let mut found_outdated = false; + + // Find all outdated edus before inserting a new one + for outdated_edu in self + .typingid_userid + .scan_prefix(prefix) + .map(|(key, _)| { + Ok::<_, Error>(( + key.clone(), + utils::u64_from_bytes( + &key.splitn(2, |&b| b == 0xff).nth(1).ok_or_else(|| { + Error::bad_database("RoomTyping has invalid timestamp or delimiters.") + })?[0..mem::size_of::()], + ) + .map_err(|_| Error::bad_database("RoomTyping has invalid timestamp bytes."))?, + )) + }) + .filter_map(|r| r.ok()) + .take_while(|&(_, timestamp)| timestamp < current_timestamp) + { + // This is an outdated edu (time > timestamp) + self.typingid_userid.remove(&outdated_edu.0)?; + found_outdated = true; + } + + if found_outdated { + self.roomid_lasttypingupdate + .insert(room_id.as_bytes(), &globals.next_count()?.to_be_bytes())?; + } + + Ok(()) + } + */ + + /// Returns the count of the last typing update in this room. + pub fn last_typing_update(&self, room_id: &RoomId) -> Result { + self.db.last_typing_update(room_id) + } + + /// Returns a new typing EDU. + pub fn typings_all( + &self, + room_id: &RoomId, + ) -> Result> { + let user_ids = self.db.typings_all(room_id)?; + + Ok(SyncEphemeralRoomEvent { + content: ruma::events::typing::TypingEventContent { + user_ids: user_ids.into_iter().collect(), + }, + }) + } +} diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs new file mode 100644 index 00000000..477a9719 --- /dev/null +++ b/src/service/rooms/event_handler/mod.rs @@ -0,0 +1,1715 @@ +/// An async function that can recursively call itself. +type AsyncRecursiveType<'a, T> = Pin + 'a + Send>>; + +use ruma::{ + api::federation::discovery::{get_remote_server_keys, get_server_keys}, + CanonicalJsonObject, CanonicalJsonValue, OwnedServerName, OwnedServerSigningKeyId, + RoomVersionId, +}; +use std::{ + collections::{btree_map, hash_map, BTreeMap, HashMap, HashSet}, + pin::Pin, + sync::{Arc, RwLock, RwLockWriteGuard}, + time::{Duration, Instant, SystemTime}, +}; +use tokio::sync::Semaphore; + +use futures_util::{stream::FuturesUnordered, Future, StreamExt}; +use ruma::{ + api::{ + client::error::ErrorKind, + federation::{ + discovery::get_remote_server_keys_batch::{self, v2::QueryCriteria}, + event::{get_event, get_room_state_ids}, + membership::create_join_event, + }, + }, + events::{ + room::{create::RoomCreateEventContent, server_acl::RoomServerAclEventContent}, + StateEventType, + }, + int, + serde::Base64, + state_res::{self, RoomVersion, StateMap}, + uint, EventId, MilliSecondsSinceUnixEpoch, RoomId, ServerName, +}; +use serde_json::value::RawValue as RawJsonValue; +use tracing::{debug, error, info, trace, warn}; + +use crate::{service::*, services, Error, PduEvent, Result}; + +pub struct Service; + +impl Service { + /// When receiving an event one needs to: + /// 0. Check the server is in the room + /// 1. Skip the PDU if we already know about it + /// 1.1. Remove unsigned field + /// 2. Check signatures, otherwise drop + /// 3. Check content hash, redact if doesn't match + /// 4. Fetch any missing auth events doing all checks listed here starting at 1. These are not + /// timeline events + /// 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are + /// also rejected "due to auth events" + /// 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + /// 7. Persist this event as an outlier + /// 8. If not timeline event: stop + /// 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline + /// events + /// 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + /// doing all the checks in this list starting at 1. These are not timeline events + /// 11. Check the auth of the event passes based on the state of the event + /// 12. Ensure that the state is derived from the previous current state (i.e. we calculated by + /// doing state res where one of the inputs was a previously trusted set of state, don't just + /// trust a set of state we got from a remote) + /// 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" + /// it + /// 14. Use state resolution to find new room state + // We use some AsyncRecursiveType hacks here so we can call this async funtion recursively + #[tracing::instrument(skip(self, value, is_timeline_event, pub_key_map))] + pub(crate) async fn handle_incoming_pdu<'a>( + &self, + origin: &'a ServerName, + event_id: &'a EventId, + room_id: &'a RoomId, + value: BTreeMap, + is_timeline_event: bool, + pub_key_map: &'a RwLock>>, + ) -> Result>> { + if !services().rooms.metadata.exists(room_id)? { + return Err(Error::BadRequest( + ErrorKind::NotFound, + "Room is unknown to this server", + )); + } + + if services().rooms.metadata.is_disabled(room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Federation of this room is currently disabled on this server.", + )); + } + + // 1. Skip the PDU if we already have it as a timeline event + if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { + return Ok(Some(pdu_id.to_vec())); + } + + let create_event = services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .ok_or_else(|| Error::bad_database("Failed to find create event in db."))?; + + let first_pdu_in_room = services() + .rooms + .timeline + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + + let (incoming_pdu, val) = self + .handle_outlier_pdu(origin, &create_event, event_id, room_id, value, pub_key_map) + .await?; + + // 8. if not timeline event: stop + if !is_timeline_event { + return Ok(None); + } + + // Skip old events + if incoming_pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + return Ok(None); + } + + // 9. Fetch any missing prev events doing all checks listed here starting at 1. These are timeline events + let (sorted_prev_events, mut eventid_info) = self + .fetch_unknown_prev_events( + origin, + &create_event, + room_id, + pub_key_map, + incoming_pdu.prev_events.clone(), + ) + .await?; + + let mut errors = 0; + for prev_id in dbg!(sorted_prev_events) { + // Check for disabled again because it might have changed + if services().rooms.metadata.is_disabled(room_id)? { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Federation of this room is currently disabled on this server.", + )); + } + + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&*prev_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", prev_id); + continue; + } + } + + if errors >= 5 { + break; + } + + if let Some((pdu, json)) = eventid_info.remove(&*prev_id) { + // Skip old events + if pdu.origin_server_ts < first_pdu_in_room.origin_server_ts { + continue; + } + + let start_time = Instant::now(); + services() + .globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), ((*prev_id).to_owned(), start_time)); + + if let Err(e) = self + .upgrade_outlier_to_timeline_pdu( + pdu, + json, + &create_event, + origin, + room_id, + pub_key_map, + ) + .await + { + errors += 1; + warn!("Prev event {} failed: {}", prev_id, e); + match services() + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry((*prev_id).to_owned()) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => { + *e.get_mut() = (Instant::now(), e.get().1 + 1) + } + } + } + let elapsed = start_time.elapsed(); + services() + .globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + warn!( + "Handling prev event {} took {}m{}s", + prev_id, + elapsed.as_secs() / 60, + elapsed.as_secs() % 60 + ); + } + } + + // Done with prev events, now handling the incoming event + + let start_time = Instant::now(); + services() + .globals + .roomid_federationhandletime + .write() + .unwrap() + .insert(room_id.to_owned(), (event_id.to_owned(), start_time)); + let r = services() + .rooms + .event_handler + .upgrade_outlier_to_timeline_pdu( + incoming_pdu, + val, + &create_event, + origin, + room_id, + pub_key_map, + ) + .await; + services() + .globals + .roomid_federationhandletime + .write() + .unwrap() + .remove(&room_id.to_owned()); + + r + } + + #[tracing::instrument(skip(self, create_event, value, pub_key_map))] + fn handle_outlier_pdu<'a>( + &'a self, + origin: &'a ServerName, + create_event: &'a PduEvent, + event_id: &'a EventId, + room_id: &'a RoomId, + mut value: BTreeMap, + pub_key_map: &'a RwLock>>, + ) -> AsyncRecursiveType<'a, Result<(Arc, BTreeMap)>> { + Box::pin(async move { + // 1.1. Remove unsigned field + value.remove("unsigned"); + + // TODO: For RoomVersion6 we must check that Raw<..> is canonical do we anywhere?: https://matrix.org/docs/spec/rooms/v6#canonical-json + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + self.fetch_required_signing_keys(&value, pub_key_map) + .await?; + + // 2. Check signatures, otherwise drop + // 3. check content hash, redact if doesn't match + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + error!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") + })?; + + let room_version_id = &create_event_content.room_version; + let room_version = + RoomVersion::new(room_version_id).expect("room version is supported"); + + let mut val = match ruma::signatures::verify_event( + &pub_key_map.read().expect("RwLock is poisoned."), + &value, + room_version_id, + ) { + Err(e) => { + // Drop + warn!("Dropping bad event {}: {}", event_id, e); + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Signature verification failed", + )); + } + Ok(ruma::signatures::Verified::Signatures) => { + // Redact + warn!("Calculated hash does not match: {}", event_id); + match ruma::canonical_json::redact(&value, room_version_id) { + Ok(obj) => obj, + Err(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Redaction failed", + )) + } + } + } + Ok(ruma::signatures::Verified::All) => value, + }; + + // Now that we have checked the signature and hashes we can add the eventID and convert + // to our PduEvent type + val.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(event_id.as_str().to_owned()), + ); + let incoming_pdu = serde_json::from_value::( + serde_json::to_value(&val).expect("CanonicalJsonObj is a valid JsonValue"), + ) + .map_err(|_| Error::bad_database("Event is not a valid PDU."))?; + + // 4. fetch any missing auth events doing all checks listed here starting at 1. These are not timeline events + // 5. Reject "due to auth events" if can't get all the auth events or some of the auth events are also rejected "due to auth events" + // NOTE: Step 5 is not applied anymore because it failed too often + warn!("Fetching auth events for {}", incoming_pdu.event_id); + self.fetch_and_handle_outliers( + origin, + &incoming_pdu + .auth_events + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; + + // 6. Reject "due to auth events" if the event doesn't pass auth based on the auth events + info!( + "Auth check for {} based on auth events", + incoming_pdu.event_id + ); + + // Build map of auth events + let mut auth_events = HashMap::new(); + for id in &incoming_pdu.auth_events { + let auth_event = match services().rooms.timeline.get_pdu(id)? { + Some(e) => e, + None => { + warn!("Could not find auth event {}", id); + continue; + } + }; + + match auth_events.entry(( + auth_event.kind.to_string().into(), + auth_event + .state_key + .clone() + .expect("all auth events have state keys"), + )) { + hash_map::Entry::Vacant(v) => { + v.insert(auth_event); + } + hash_map::Entry::Occupied(_) => { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth event's type and state_key combination exists multiple times.", + )); + } + } + } + + // The original create event must be in the auth events + if auth_events + .get(&(StateEventType::RoomCreate, "".to_owned())) + .map(|a| a.as_ref()) + != Some(create_event) + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Incoming event refers to wrong create event.", + )); + } + + if !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| auth_events.get(&(k.to_string().into(), s.to_owned())), + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed"))? + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Auth check failed", + )); + } + + info!("Validation successful."); + + // 7. Persist the event as an outlier. + services() + .rooms + .outlier + .add_pdu_outlier(&incoming_pdu.event_id, &val)?; + + info!("Added pdu as outlier."); + + Ok((Arc::new(incoming_pdu), val)) + }) + } + + #[tracing::instrument(skip(self, incoming_pdu, val, create_event, pub_key_map))] + pub async fn upgrade_outlier_to_timeline_pdu( + &self, + incoming_pdu: Arc, + val: BTreeMap, + create_event: &PduEvent, + origin: &ServerName, + room_id: &RoomId, + pub_key_map: &RwLock>>, + ) -> Result>> { + // Skip the PDU if we already have it as a timeline event + if let Ok(Some(pduid)) = services().rooms.timeline.get_pdu_id(&incoming_pdu.event_id) { + return Ok(Some(pduid)); + } + + if services() + .rooms + .pdu_metadata + .is_event_soft_failed(&incoming_pdu.event_id)? + { + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event has been soft failed", + )); + } + + info!("Upgrading {} to timeline pdu", incoming_pdu.event_id); + + let create_event_content: RoomCreateEventContent = + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::BadDatabase("Invalid create event in db") + })?; + + let room_version_id = &create_event_content.room_version; + let room_version = RoomVersion::new(room_version_id).expect("room version is supported"); + + // 10. Fetch missing state and auth chain events by calling /state_ids at backwards extremities + // doing all the checks in this list starting at 1. These are not timeline events. + + // TODO: if we know the prev_events of the incoming event we can avoid the request and build + // the state from a known point and resolve if > 1 prev_event + + info!("Requesting state at event"); + let mut state_at_incoming_event = None; + + if incoming_pdu.prev_events.len() == 1 { + let prev_event = &*incoming_pdu.prev_events[0]; + let prev_event_sstatehash = services() + .rooms + .state_accessor + .pdu_shortstatehash(prev_event)?; + + let state = if let Some(shortstatehash) = prev_event_sstatehash { + Some( + services() + .rooms + .state_accessor + .state_full_ids(shortstatehash) + .await, + ) + } else { + None + }; + + if let Some(Ok(mut state)) = state { + info!("Using cached state"); + let prev_pdu = services() + .rooms + .timeline + .get_pdu(prev_event) + .ok() + .flatten() + .ok_or_else(|| { + Error::bad_database("Could not find prev event, but we know the state.") + })?; + + if let Some(state_key) = &prev_pdu.state_key { + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &prev_pdu.kind.to_string().into(), + state_key, + )?; + + state.insert(shortstatekey, Arc::from(prev_event)); + // Now it's the state after the pdu + } + + state_at_incoming_event = Some(state); + } + } else { + info!("Calculating state at event using state res"); + let mut extremity_sstatehashes = HashMap::new(); + + let mut okay = true; + for prev_eventid in &incoming_pdu.prev_events { + let prev_event = + if let Ok(Some(pdu)) = services().rooms.timeline.get_pdu(prev_eventid) { + pdu + } else { + okay = false; + break; + }; + + let sstatehash = if let Ok(Some(s)) = services() + .rooms + .state_accessor + .pdu_shortstatehash(prev_eventid) + { + s + } else { + okay = false; + break; + }; + + extremity_sstatehashes.insert(sstatehash, prev_event); + } + + if okay { + let mut fork_states = Vec::with_capacity(extremity_sstatehashes.len()); + let mut auth_chain_sets = Vec::with_capacity(extremity_sstatehashes.len()); + + for (sstatehash, prev_event) in extremity_sstatehashes { + let mut leaf_state: BTreeMap<_, _> = services() + .rooms + .state_accessor + .state_full_ids(sstatehash) + .await?; + + if let Some(state_key) = &prev_event.state_key { + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &prev_event.kind.to_string().into(), + state_key, + )?; + leaf_state.insert(shortstatekey, Arc::from(&*prev_event.event_id)); + // Now it's the state after the pdu + } + + let mut state = StateMap::with_capacity(leaf_state.len()); + let mut starting_events = Vec::with_capacity(leaf_state.len()); + + for (k, id) in leaf_state { + if let Ok((ty, st_key)) = services().rooms.short.get_statekey_from_short(k) + { + // FIXME: Undo .to_string().into() when StateMap + // is updated to use StateEventType + state.insert((ty.to_string().into(), st_key), id.clone()); + } else { + warn!("Failed to get_statekey_from_short."); + } + starting_events.push(id); + } + + auth_chain_sets.push( + services() + .rooms + .auth_chain + .get_auth_chain(room_id, starting_events) + .await? + .collect(), + ); + + fork_states.push(state); + } + + let lock = services().globals.stateres_mutex.lock(); + + let result = + state_res::resolve(room_version_id, &fork_states, auth_chain_sets, |id| { + let res = services().rooms.timeline.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }); + drop(lock); + + state_at_incoming_event = match result { + Ok(new_state) => Some( + new_state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = + services().rooms.short.get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + )?; + Ok((shortstatekey, event_id)) + }) + .collect::>()?, + ), + Err(e) => { + warn!("State resolution on prev events failed, either an event could not be found or deserialization: {}", e); + None + } + } + } + } + + if state_at_incoming_event.is_none() { + info!("Calling /state_ids"); + // Call /state_ids to find out what the state at this pdu is. We trust the server's + // response to some extend, but we still do a lot of checks on the events + match services() + .sending + .send_federation_request( + origin, + get_room_state_ids::v1::Request { + room_id, + event_id: &incoming_pdu.event_id, + }, + ) + .await + { + Ok(res) => { + info!("Fetching state events at event."); + let state_vec = self + .fetch_and_handle_outliers( + origin, + &res.pdu_ids + .iter() + .map(|x| Arc::from(&**x)) + .collect::>(), + create_event, + room_id, + pub_key_map, + ) + .await; + + let mut state: BTreeMap<_, Arc> = BTreeMap::new(); + for (pdu, _) in state_vec { + let state_key = pdu.state_key.clone().ok_or_else(|| { + Error::bad_database("Found non-state pdu in state events.") + })?; + + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &pdu.kind.to_string().into(), + &state_key, + )?; + + match state.entry(shortstatekey) { + btree_map::Entry::Vacant(v) => { + v.insert(Arc::from(&*pdu.event_id)); + } + btree_map::Entry::Occupied(_) => return Err( + Error::bad_database("State event's type and state_key combination exists multiple times."), + ), + } + } + + // The original create event must still be in the state + let create_shortstatekey = services() + .rooms + .short + .get_shortstatekey(&StateEventType::RoomCreate, "")? + .expect("Room exists"); + + if state.get(&create_shortstatekey).map(|id| id.as_ref()) + != Some(&create_event.event_id) + { + return Err(Error::bad_database( + "Incoming event refers to wrong create event.", + )); + } + + state_at_incoming_event = Some(state); + } + Err(e) => { + warn!("Fetching state for event failed: {}", e); + return Err(e); + } + }; + } + + let state_at_incoming_event = + state_at_incoming_event.expect("we always set this to some above"); + + info!("Starting auth check"); + // 11. Check the auth of the event passes based on the state of the event + let check_result = state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, // TODO: third party invite + |k, s| { + services() + .rooms + .short + .get_shortstatekey(&k.to_string().into(), s) + .ok() + .flatten() + .and_then(|shortstatekey| state_at_incoming_event.get(&shortstatekey)) + .and_then(|event_id| services().rooms.timeline.get_pdu(event_id).ok().flatten()) + }, + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; + + if !check_result { + return Err(Error::bad_database( + "Event has failed auth check with state at the event.", + )); + } + info!("Auth check succeeded"); + + // We start looking at current room state now, so lets lock the room + + let mutex_state = Arc::clone( + services() + .globals + .roomid_mutex_state + .write() + .unwrap() + .entry(room_id.to_owned()) + .or_default(), + ); + let state_lock = mutex_state.lock().await; + + // Now we calculate the set of extremities this room has after the incoming event has been + // applied. We start with the previous extremities (aka leaves) + info!("Calculating extremities"); + let mut extremities = services().rooms.state.get_forward_extremities(room_id)?; + + // Remove any forward extremities that are referenced by this incoming event's prev_events + for prev_event in &incoming_pdu.prev_events { + if extremities.contains(prev_event) { + extremities.remove(prev_event); + } + } + + // Only keep those extremities were not referenced yet + extremities.retain(|id| { + !matches!( + services() + .rooms + .pdu_metadata + .is_event_referenced(room_id, id), + Ok(true) + ) + }); + + info!("Compressing state at event"); + let state_ids_compressed = state_at_incoming_event + .iter() + .map(|(shortstatekey, id)| { + services() + .rooms + .state_compressor + .compress_state_event(*shortstatekey, id) + }) + .collect::>()?; + + // 13. Check if the event passes auth based on the "current state" of the room, if not "soft fail" it + info!("Starting soft fail auth check"); + + let auth_events = services().rooms.state.get_auth_events( + room_id, + &incoming_pdu.kind, + &incoming_pdu.sender, + incoming_pdu.state_key.as_deref(), + &incoming_pdu.content, + )?; + + let soft_fail = !state_res::event_auth::auth_check( + &room_version, + &incoming_pdu, + None::, + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|_e| Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed."))?; + + if soft_fail { + services().rooms.timeline.append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + )?; + + // Soft fail, we keep the event as an outlier but don't add it to the timeline + warn!("Event was soft failed: {:?}", incoming_pdu); + services() + .rooms + .pdu_metadata + .mark_event_soft_failed(&incoming_pdu.event_id)?; + return Err(Error::BadRequest( + ErrorKind::InvalidParam, + "Event has been soft failed", + )); + } + + if incoming_pdu.state_key.is_some() { + info!("Loading current room state ids"); + let current_sstatehash = services() + .rooms + .state + .get_room_shortstatehash(room_id)? + .expect("every room has state"); + + let current_state_ids = services() + .rooms + .state_accessor + .state_full_ids(current_sstatehash) + .await?; + + info!("Preparing for stateres to derive new room state"); + let mut extremity_sstatehashes = HashMap::new(); + + info!("Loading extremities"); + for id in dbg!(&extremities) { + match services().rooms.timeline.get_pdu(id)? { + Some(leaf_pdu) => { + extremity_sstatehashes.insert( + services() + .rooms + .state_accessor + .pdu_shortstatehash(&leaf_pdu.event_id)? + .ok_or_else(|| { + error!( + "Found extremity pdu with no statehash in db: {:?}", + leaf_pdu + ); + Error::bad_database("Found pdu with no statehash in db.") + })?, + leaf_pdu, + ); + } + _ => { + error!("Missing state snapshot for {:?}", id); + return Err(Error::BadDatabase("Missing state snapshot.")); + } + } + } + + let mut fork_states = Vec::new(); + + // 12. Ensure that the state is derived from the previous current state (i.e. we calculated + // by doing state res where one of the inputs was a previously trusted set of state, + // don't just trust a set of state we got from a remote). + + // We do this by adding the current state to the list of fork states + extremity_sstatehashes.remove(¤t_sstatehash); + fork_states.push(current_state_ids); + + // We also add state after incoming event to the fork states + let mut state_after = state_at_incoming_event.clone(); + if let Some(state_key) = &incoming_pdu.state_key { + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &incoming_pdu.kind.to_string().into(), + state_key, + )?; + + state_after.insert(shortstatekey, Arc::from(&*incoming_pdu.event_id)); + } + fork_states.push(state_after); + + let mut update_state = false; + // 14. Use state resolution to find new room state + let new_room_state = if fork_states.is_empty() { + panic!("State is empty"); + } else if fork_states.iter().skip(1).all(|f| &fork_states[0] == f) { + info!("State resolution trivial"); + // There was only one state, so it has to be the room's current state (because that is + // always included) + fork_states[0] + .iter() + .map(|(k, id)| { + services() + .rooms + .state_compressor + .compress_state_event(*k, id) + }) + .collect::>()? + } else { + info!("Loading auth chains"); + // We do need to force an update to this room's state + update_state = true; + + let mut auth_chain_sets = Vec::new(); + for state in &fork_states { + auth_chain_sets.push( + services() + .rooms + .auth_chain + .get_auth_chain( + room_id, + state.iter().map(|(_, id)| id.clone()).collect(), + ) + .await? + .collect(), + ); + } + + info!("Loading fork states"); + + let fork_states: Vec<_> = fork_states + .into_iter() + .map(|map| { + map.into_iter() + .filter_map(|(k, id)| { + services() + .rooms + .short + .get_statekey_from_short(k) + .map(|(ty, st_key)| ((ty.to_string().into(), st_key), id)) + .ok() + }) + .collect::>() + }) + .collect(); + + info!("Resolving state"); + + let lock = services().globals.stateres_mutex.lock(); + let state = match state_res::resolve( + room_version_id, + &fork_states, + auth_chain_sets, + |id| { + let res = services().rooms.timeline.get_pdu(id); + if let Err(e) = &res { + error!("LOOK AT ME Failed to fetch event: {}", e); + } + res.ok().flatten() + }, + ) { + Ok(new_state) => new_state, + Err(_) => { + return Err(Error::bad_database("State resolution failed, either an event could not be found or deserialization")); + } + }; + + drop(lock); + + info!("State resolution done. Compressing state"); + + state + .into_iter() + .map(|((event_type, state_key), event_id)| { + let shortstatekey = services().rooms.short.get_or_create_shortstatekey( + &event_type.to_string().into(), + &state_key, + )?; + services() + .rooms + .state_compressor + .compress_state_event(shortstatekey, &event_id) + }) + .collect::>()? + }; + + // Set the new room state to the resolved state + if update_state { + info!("Forcing new room state"); + let (sstatehash, new, removed) = services() + .rooms + .state_compressor + .save_state(room_id, new_room_state)?; + services() + .rooms + .state + .force_state(room_id, sstatehash, new, removed, &state_lock) + .await?; + } + } + + info!("Appending pdu to timeline"); + extremities.insert(incoming_pdu.event_id.clone()); + + // Now that the event has passed all auth it is added into the timeline. + // We use the `state_at_event` instead of `state_after` so we accurately + // represent the state for this event. + + let pdu_id = services().rooms.timeline.append_incoming_pdu( + &incoming_pdu, + val, + extremities.iter().map(|e| (**e).to_owned()).collect(), + state_ids_compressed, + soft_fail, + &state_lock, + )?; + + info!("Appended incoming pdu"); + + // Event has passed all auth/stateres checks + drop(state_lock); + Ok(pdu_id) + } + + /// Find the event and auth it. Once the event is validated (steps 1 - 8) + /// it is appended to the outliers Tree. + /// + /// Returns pdu and if we fetched it over federation the raw json. + /// + /// a. Look in the main timeline (pduid_pdu tree) + /// b. Look at outlier pdu tree + /// c. Ask origin server over federation + /// d. TODO: Ask other servers over federation? + #[tracing::instrument(skip_all)] + pub(crate) fn fetch_and_handle_outliers<'a>( + &'a self, + origin: &'a ServerName, + events: &'a [Arc], + create_event: &'a PduEvent, + room_id: &'a RoomId, + pub_key_map: &'a RwLock>>, + ) -> AsyncRecursiveType<'a, Vec<(Arc, Option>)>> + { + Box::pin(async move { + let back_off = |id| match services() + .globals + .bad_event_ratelimiter + .write() + .unwrap() + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + let mut pdus = vec![]; + for id in events { + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(&**id) + { + // Exponential backoff + let mut min_elapsed_duration = + Duration::from_secs(5 * 60) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + info!("Backing off from {}", id); + continue; + } + } + + // a. Look in the main timeline (pduid_pdu tree) + // b. Look at outlier pdu tree + // (get_pdu_json checks both) + if let Ok(Some(local_pdu)) = services().rooms.timeline.get_pdu(id) { + trace!("Found {} in db", id); + pdus.push((local_pdu, None)); + continue; + } + + // c. Ask origin server over federation + // We also handle its auth chain here so we don't get a stack overflow in + // handle_outlier_pdu. + let mut todo_auth_events = vec![Arc::clone(id)]; + let mut events_in_reverse_order = Vec::new(); + let mut events_all = HashSet::new(); + let mut i = 0; + while let Some(next_id) = todo_auth_events.pop() { + if events_all.contains(&next_id) { + continue; + } + + i += 1; + if i % 100 == 0 { + tokio::task::yield_now().await; + } + + if let Ok(Some(_)) = services().rooms.timeline.get_pdu(&next_id) { + trace!("Found {} in db", id); + continue; + } + + info!("Fetching {} over federation.", next_id); + match services() + .sending + .send_federation_request( + origin, + get_event::v1::Request { event_id: &next_id }, + ) + .await + { + Ok(res) => { + info!("Got {} over federation", next_id); + let (calculated_event_id, value) = + match pdu::gen_event_id_canonical_json(&res.pdu) { + Ok(t) => t, + Err(_) => { + back_off((*next_id).to_owned()); + continue; + } + }; + + if calculated_event_id != *next_id { + warn!("Server didn't return event id we requested: requested: {}, we got {}. Event: {:?}", + next_id, calculated_event_id, &res.pdu); + } + + if let Some(auth_events) = + value.get("auth_events").and_then(|c| c.as_array()) + { + for auth_event in auth_events { + if let Ok(auth_event) = + serde_json::from_value(auth_event.clone().into()) + { + let a: Arc = auth_event; + todo_auth_events.push(a); + } else { + warn!("Auth event id is not valid"); + } + } + } else { + warn!("Auth event list invalid"); + } + + events_in_reverse_order.push((next_id.clone(), value)); + events_all.insert(next_id); + } + Err(_) => { + warn!("Failed to fetch event: {}", next_id); + back_off((*next_id).to_owned()); + } + } + } + + for (next_id, value) in events_in_reverse_order.iter().rev() { + match self + .handle_outlier_pdu( + origin, + create_event, + next_id, + room_id, + value.clone(), + pub_key_map, + ) + .await + { + Ok((pdu, json)) => { + if next_id == id { + pdus.push((pdu, Some(json))); + } + } + Err(e) => { + warn!("Authentication of event {} failed: {:?}", next_id, e); + back_off((**next_id).to_owned()); + } + } + } + } + pdus + }) + } + + async fn fetch_unknown_prev_events( + &self, + origin: &ServerName, + create_event: &PduEvent, + room_id: &RoomId, + pub_key_map: &RwLock>>, + initial_set: Vec>, + ) -> Result<( + Vec>, + HashMap, (Arc, BTreeMap)>, + )> { + let mut graph: HashMap, _> = HashMap::new(); + let mut eventid_info = HashMap::new(); + let mut todo_outlier_stack: Vec> = initial_set; + + let first_pdu_in_room = services() + .rooms + .timeline + .first_pdu_in_room(room_id)? + .ok_or_else(|| Error::bad_database("Failed to find first pdu in db."))?; + + let mut amount = 0; + + while let Some(prev_event_id) = todo_outlier_stack.pop() { + if let Some((pdu, json_opt)) = self + .fetch_and_handle_outliers( + origin, + &[prev_event_id.clone()], + create_event, + room_id, + pub_key_map, + ) + .await + .pop() + { + if amount > 100 { + // Max limit reached + warn!("Max prev event limit reached!"); + graph.insert(prev_event_id.clone(), HashSet::new()); + continue; + } + + if let Some(json) = json_opt.or_else(|| { + services() + .rooms + .outlier + .get_outlier_pdu_json(&prev_event_id) + .ok() + .flatten() + }) { + if pdu.origin_server_ts > first_pdu_in_room.origin_server_ts { + amount += 1; + for prev_prev in &pdu.prev_events { + if !graph.contains_key(prev_prev) { + todo_outlier_stack.push(prev_prev.clone()); + } + } + + graph.insert( + prev_event_id.clone(), + pdu.prev_events.iter().cloned().collect(), + ); + } else { + // Time based check failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + + eventid_info.insert(prev_event_id.clone(), (pdu, json)); + } else { + // Get json failed, so this was not fetched over federation + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } else { + // Fetch and handle failed + graph.insert(prev_event_id.clone(), HashSet::new()); + } + } + + let sorted = state_res::lexicographical_topological_sort(&graph, |event_id| { + // This return value is the key used for sorting events, + // events are then sorted by power level, time, + // and lexically by event_id. + println!("{}", event_id); + Ok(( + int!(0), + MilliSecondsSinceUnixEpoch( + eventid_info + .get(event_id) + .map_or_else(|| uint!(0), |info| info.0.origin_server_ts), + ), + )) + }) + .map_err(|_| Error::bad_database("Error sorting prev events"))?; + + Ok((sorted, eventid_info)) + } + + #[tracing::instrument(skip_all)] + pub(crate) async fn fetch_required_signing_keys( + &self, + event: &BTreeMap, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let signatures = event + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + // We go through all the signatures we see on the value and fetch the corresponding signing + // keys + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let fetch_res = self + .fetch_signing_keys( + signature_server.as_str().try_into().map_err(|_| { + Error::BadServerResponse( + "Invalid servername in signatures of server response pdu.", + ) + })?, + signature_ids, + ) + .await; + + let keys = match fetch_res { + Ok(keys) => keys, + Err(_) => { + warn!("Signature verification failed: Could not fetch signing key.",); + continue; + } + }; + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(signature_server.clone(), keys); + } + + Ok(()) + } + + // Gets a list of servers for which we don't have the signing key yet. We go over + // the PDUs and either cache the key or add it to the list that needs to be retrieved. + fn get_server_keys_from_cache( + &self, + pdu: &RawJsonValue, + servers: &mut BTreeMap>, + room_version: &RoomVersionId, + pub_key_map: &mut RwLockWriteGuard<'_, BTreeMap>>, + ) -> Result<()> { + let value: CanonicalJsonObject = serde_json::from_str(pdu.get()).map_err(|e| { + error!("Invalid PDU in server response: {:?}: {:?}", pdu, e); + Error::BadServerResponse("Invalid PDU in server response") + })?; + + let event_id = format!( + "${}", + ruma::signatures::reference_hash(&value, room_version) + .expect("ruma can calculate reference hashes") + ); + let event_id = <&EventId>::try_from(event_id.as_str()) + .expect("ruma's reference hashes are valid event ids"); + + if let Some((time, tries)) = services() + .globals + .bad_event_ratelimiter + .read() + .unwrap() + .get(event_id) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {}", event_id); + return Err(Error::BadServerResponse("bad event, still backing off")); + } + } + + let signatures = value + .get("signatures") + .ok_or(Error::BadServerResponse( + "No signatures in server response pdu.", + ))? + .as_object() + .ok_or(Error::BadServerResponse( + "Invalid signatures object in server response pdu.", + ))?; + + for (signature_server, signature) in signatures { + let signature_object = signature.as_object().ok_or(Error::BadServerResponse( + "Invalid signatures content object in server response pdu.", + ))?; + + let signature_ids = signature_object.keys().cloned().collect::>(); + + let contains_all_ids = |keys: &BTreeMap| { + signature_ids.iter().all(|id| keys.contains_key(id)) + }; + + let origin = <&ServerName>::try_from(signature_server.as_str()).map_err(|_| { + Error::BadServerResponse("Invalid servername in signatures of server response pdu.") + })?; + + if servers.contains_key(origin) || pub_key_map.contains_key(origin.as_str()) { + continue; + } + + trace!("Loading signing keys for {}", origin); + + let result: BTreeMap<_, _> = services() + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if !contains_all_ids(&result) { + trace!("Signing key not loaded for {}", origin); + servers.insert(origin.to_owned(), BTreeMap::new()); + } + + pub_key_map.insert(origin.to_string(), result); + } + + Ok(()) + } + + pub(crate) async fn fetch_join_signing_keys( + &self, + event: &create_join_event::v2::Response, + room_version: &RoomVersionId, + pub_key_map: &RwLock>>, + ) -> Result<()> { + let mut servers: BTreeMap< + OwnedServerName, + BTreeMap, + > = BTreeMap::new(); + + { + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + + // Try to fetch keys, failure is okay + // Servers we couldn't find in the cache will be added to `servers` + for pdu in &event.room_state.state { + let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); + } + for pdu in &event.room_state.auth_chain { + let _ = self.get_server_keys_from_cache(pdu, &mut servers, room_version, &mut pkm); + } + + drop(pkm); + } + + if servers.is_empty() { + // We had all keys locally + return Ok(()); + } + + for server in services().globals.trusted_servers() { + trace!("Asking batch signing keys from trusted server {}", server); + if let Ok(keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys_batch::v2::Request { + server_keys: servers.clone(), + }, + ) + .await + { + trace!("Got signing keys: {:?}", keys); + let mut pkm = pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))?; + for k in keys.server_keys { + let k = k.deserialize().unwrap(); + + // TODO: Check signature from trusted server? + servers.remove(&k.server_name); + + let result = services() + .globals + .add_signing_key(&k.server_name, k.clone())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect::>(); + + pkm.insert(k.server_name.to_string(), result); + } + } + + if servers.is_empty() { + return Ok(()); + } + } + + let mut futures: FuturesUnordered<_> = servers + .into_keys() + .map(|server| async move { + ( + services() + .sending + .send_federation_request(&server, get_server_keys::v2::Request::new()) + .await, + server, + ) + }) + .collect(); + + while let Some(result) = futures.next().await { + if let (Ok(get_keys_response), origin) = result { + let result: BTreeMap<_, _> = services() + .globals + .add_signing_key(&origin, get_keys_response.server_key.deserialize().unwrap())? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + pub_key_map + .write() + .map_err(|_| Error::bad_database("RwLock is poisoned."))? + .insert(origin.to_string(), result); + } + } + + Ok(()) + } + + /// Returns Ok if the acl allows the server + pub fn acl_check(&self, server_name: &ServerName, room_id: &RoomId) -> Result<()> { + let acl_event = match services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomServerAcl, + "", + )? { + Some(acl) => acl, + None => return Ok(()), + }; + + let acl_event_content: RoomServerAclEventContent = + match serde_json::from_str(acl_event.content.get()) { + Ok(content) => content, + Err(_) => { + warn!("Invalid ACL event"); + return Ok(()); + } + }; + + if acl_event_content.is_allowed(server_name) { + Ok(()) + } else { + Err(Error::BadRequest( + ErrorKind::Forbidden, + "Server was denied by ACL", + )) + } + } + + /// Search the DB for the signing keys of the given server, if we don't have them + /// fetch them from the server and save to our DB. + #[tracing::instrument(skip_all)] + pub async fn fetch_signing_keys( + &self, + origin: &ServerName, + signature_ids: Vec, + ) -> Result> { + let contains_all_ids = + |keys: &BTreeMap| signature_ids.iter().all(|id| keys.contains_key(id)); + + let permit = services() + .globals + .servername_ratelimiter + .read() + .unwrap() + .get(origin) + .map(|s| Arc::clone(s).acquire_owned()); + + let permit = match permit { + Some(p) => p, + None => { + let mut write = services().globals.servername_ratelimiter.write().unwrap(); + let s = Arc::clone( + write + .entry(origin.to_owned()) + .or_insert_with(|| Arc::new(Semaphore::new(1))), + ); + + s.acquire_owned() + } + } + .await; + + let back_off = |id| match services() + .globals + .bad_signature_ratelimiter + .write() + .unwrap() + .entry(id) + { + hash_map::Entry::Vacant(e) => { + e.insert((Instant::now(), 1)); + } + hash_map::Entry::Occupied(mut e) => *e.get_mut() = (Instant::now(), e.get().1 + 1), + }; + + if let Some((time, tries)) = services() + .globals + .bad_signature_ratelimiter + .read() + .unwrap() + .get(&signature_ids) + { + // Exponential backoff + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + debug!("Backing off from {:?}", signature_ids); + return Err(Error::BadServerResponse("bad signature, still backing off")); + } + } + + trace!("Loading signing keys for {}", origin); + + let mut result: BTreeMap<_, _> = services() + .globals + .signing_keys_for(origin)? + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)) + .collect(); + + if contains_all_ids(&result) { + return Ok(result); + } + + debug!("Fetching signing keys for {} over federation", origin); + + if let Some(server_key) = services() + .sending + .send_federation_request(origin, get_server_keys::v2::Request::new()) + .await + .ok() + .and_then(|resp| resp.server_key.deserialize().ok()) + { + services() + .globals + .add_signing_key(origin, server_key.clone())?; + + result.extend( + server_key + .verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + server_key + .old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + + if contains_all_ids(&result) { + return Ok(result); + } + } + + for server in services().globals.trusted_servers() { + debug!("Asking {} for {}'s signing key", server, origin); + if let Some(server_keys) = services() + .sending + .send_federation_request( + server, + get_remote_server_keys::v2::Request::new( + origin, + MilliSecondsSinceUnixEpoch::from_system_time( + SystemTime::now() + .checked_add(Duration::from_secs(3600)) + .expect("SystemTime to large"), + ) + .expect("time is valid"), + ), + ) + .await + .ok() + .map(|resp| { + resp.server_keys + .into_iter() + .filter_map(|e| e.deserialize().ok()) + .collect::>() + }) + { + trace!("Got signing keys: {:?}", server_keys); + for k in server_keys { + services().globals.add_signing_key(origin, k.clone())?; + result.extend( + k.verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + result.extend( + k.old_verify_keys + .into_iter() + .map(|(k, v)| (k.to_string(), v.key)), + ); + } + + if contains_all_ids(&result) { + return Ok(result); + } + } + } + + drop(permit); + + back_off(signature_ids); + + warn!("Failed to find public key for server: {}", origin); + Err(Error::BadServerResponse( + "Failed to find public key for server", + )) + } +} diff --git a/src/service/rooms/lazy_loading/data.rs b/src/service/rooms/lazy_loading/data.rs new file mode 100644 index 00000000..9af8e21b --- /dev/null +++ b/src/service/rooms/lazy_loading/data.rs @@ -0,0 +1,27 @@ +use crate::Result; +use ruma::{DeviceId, RoomId, UserId}; + +pub trait Data: Send + Sync { + fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result; + + fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + confirmed_user_ids: &mut dyn Iterator, + ) -> Result<()>; + + fn lazy_load_reset( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ) -> Result<()>; +} diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs new file mode 100644 index 00000000..701a7340 --- /dev/null +++ b/src/service/rooms/lazy_loading/mod.rs @@ -0,0 +1,88 @@ +mod data; +use std::{ + collections::{HashMap, HashSet}, + sync::Mutex, +}; + +pub use data::Data; +use ruma::{DeviceId, OwnedDeviceId, OwnedRoomId, OwnedUserId, RoomId, UserId}; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, + + pub lazy_load_waiting: + Mutex>>, +} + +impl Service { + #[tracing::instrument(skip(self))] + pub fn lazy_load_was_sent_before( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ll_user: &UserId, + ) -> Result { + self.db + .lazy_load_was_sent_before(user_id, device_id, room_id, ll_user) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_mark_sent( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + lazy_load: HashSet, + count: u64, + ) { + self.lazy_load_waiting.lock().unwrap().insert( + ( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + count, + ), + lazy_load, + ); + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_confirm_delivery( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + since: u64, + ) -> Result<()> { + if let Some(user_ids) = self.lazy_load_waiting.lock().unwrap().remove(&( + user_id.to_owned(), + device_id.to_owned(), + room_id.to_owned(), + since, + )) { + self.db.lazy_load_confirm_delivery( + user_id, + device_id, + room_id, + &mut user_ids.iter().map(|u| &**u), + )?; + } else { + // Ignore + } + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn lazy_load_reset( + &self, + user_id: &UserId, + device_id: &DeviceId, + room_id: &RoomId, + ) -> Result<()> { + self.db.lazy_load_reset(user_id, device_id, room_id) + } +} diff --git a/src/service/rooms/metadata/data.rs b/src/service/rooms/metadata/data.rs new file mode 100644 index 00000000..339db573 --- /dev/null +++ b/src/service/rooms/metadata/data.rs @@ -0,0 +1,9 @@ +use crate::Result; +use ruma::{OwnedRoomId, RoomId}; + +pub trait Data: Send + Sync { + fn exists(&self, room_id: &RoomId) -> Result; + fn iter_ids<'a>(&'a self) -> Box> + 'a>; + fn is_disabled(&self, room_id: &RoomId) -> Result; + fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()>; +} diff --git a/src/service/rooms/metadata/mod.rs b/src/service/rooms/metadata/mod.rs new file mode 100644 index 00000000..d1884691 --- /dev/null +++ b/src/service/rooms/metadata/mod.rs @@ -0,0 +1,30 @@ +mod data; + +pub use data::Data; +use ruma::{OwnedRoomId, RoomId}; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Checks if a room exists. + #[tracing::instrument(skip(self))] + pub fn exists(&self, room_id: &RoomId) -> Result { + self.db.exists(room_id) + } + + pub fn iter_ids<'a>(&'a self) -> Box> + 'a> { + self.db.iter_ids() + } + + pub fn is_disabled(&self, room_id: &RoomId) -> Result { + self.db.is_disabled(room_id) + } + + pub fn disable_room(&self, room_id: &RoomId, disabled: bool) -> Result<()> { + self.db.disable_room(room_id, disabled) + } +} diff --git a/src/service/rooms/mod.rs b/src/service/rooms/mod.rs new file mode 100644 index 00000000..8956e4d8 --- /dev/null +++ b/src/service/rooms/mod.rs @@ -0,0 +1,57 @@ +pub mod alias; +pub mod auth_chain; +pub mod directory; +pub mod edus; +pub mod event_handler; +pub mod lazy_loading; +pub mod metadata; +pub mod outlier; +pub mod pdu_metadata; +pub mod search; +pub mod short; +pub mod state; +pub mod state_accessor; +pub mod state_cache; +pub mod state_compressor; +pub mod timeline; +pub mod user; + +pub trait Data: + alias::Data + + auth_chain::Data + + directory::Data + + edus::Data + + lazy_loading::Data + + metadata::Data + + outlier::Data + + pdu_metadata::Data + + search::Data + + short::Data + + state::Data + + state_accessor::Data + + state_cache::Data + + state_compressor::Data + + timeline::Data + + user::Data +{ +} + +pub struct Service { + pub alias: alias::Service, + pub auth_chain: auth_chain::Service, + pub directory: directory::Service, + pub edus: edus::Service, + pub event_handler: event_handler::Service, + pub lazy_loading: lazy_loading::Service, + pub metadata: metadata::Service, + pub outlier: outlier::Service, + pub pdu_metadata: pdu_metadata::Service, + pub search: search::Service, + pub short: short::Service, + pub state: state::Service, + pub state_accessor: state_accessor::Service, + pub state_cache: state_cache::Service, + pub state_compressor: state_compressor::Service, + pub timeline: timeline::Service, + pub user: user::Service, +} diff --git a/src/service/rooms/outlier/data.rs b/src/service/rooms/outlier/data.rs new file mode 100644 index 00000000..0ed521dd --- /dev/null +++ b/src/service/rooms/outlier/data.rs @@ -0,0 +1,9 @@ +use ruma::{CanonicalJsonObject, EventId}; + +use crate::{PduEvent, Result}; + +pub trait Data: Send + Sync { + fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result>; + fn get_outlier_pdu(&self, event_id: &EventId) -> Result>; + fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()>; +} diff --git a/src/service/rooms/outlier/mod.rs b/src/service/rooms/outlier/mod.rs new file mode 100644 index 00000000..dae41e4b --- /dev/null +++ b/src/service/rooms/outlier/mod.rs @@ -0,0 +1,28 @@ +mod data; + +pub use data::Data; +use ruma::{CanonicalJsonObject, EventId}; + +use crate::{PduEvent, Result}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Returns the pdu from the outlier tree. + pub fn get_outlier_pdu_json(&self, event_id: &EventId) -> Result> { + self.db.get_outlier_pdu_json(event_id) + } + + /// Returns the pdu from the outlier tree. + pub fn get_pdu_outlier(&self, event_id: &EventId) -> Result> { + self.db.get_outlier_pdu(event_id) + } + + /// Append the PDU as an outlier. + #[tracing::instrument(skip(self, pdu))] + pub fn add_pdu_outlier(&self, event_id: &EventId, pdu: &CanonicalJsonObject) -> Result<()> { + self.db.add_pdu_outlier(event_id, pdu) + } +} diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs new file mode 100644 index 00000000..b157938f --- /dev/null +++ b/src/service/rooms/pdu_metadata/data.rs @@ -0,0 +1,11 @@ +use std::sync::Arc; + +use crate::Result; +use ruma::{EventId, RoomId}; + +pub trait Data: Send + Sync { + fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()>; + fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result; + fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()>; + fn is_event_soft_failed(&self, event_id: &EventId) -> Result; +} diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs new file mode 100644 index 00000000..b816678c --- /dev/null +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -0,0 +1,33 @@ +mod data; +use std::sync::Arc; + +pub use data::Data; +use ruma::{EventId, RoomId}; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + #[tracing::instrument(skip(self, room_id, event_ids))] + pub fn mark_as_referenced(&self, room_id: &RoomId, event_ids: &[Arc]) -> Result<()> { + self.db.mark_as_referenced(room_id, event_ids) + } + + #[tracing::instrument(skip(self))] + pub fn is_event_referenced(&self, room_id: &RoomId, event_id: &EventId) -> Result { + self.db.is_event_referenced(room_id, event_id) + } + + #[tracing::instrument(skip(self))] + pub fn mark_event_soft_failed(&self, event_id: &EventId) -> Result<()> { + self.db.mark_event_soft_failed(event_id) + } + + #[tracing::instrument(skip(self))] + pub fn is_event_soft_failed(&self, event_id: &EventId) -> Result { + self.db.is_event_soft_failed(event_id) + } +} diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs new file mode 100644 index 00000000..82c08004 --- /dev/null +++ b/src/service/rooms/search/data.rs @@ -0,0 +1,12 @@ +use crate::Result; +use ruma::RoomId; + +pub trait Data: Send + Sync { + fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; + + fn search_pdus<'a>( + &'a self, + room_id: &RoomId, + search_string: &str, + ) -> Result> + 'a>, Vec)>>; +} diff --git a/src/service/rooms/search/mod.rs b/src/service/rooms/search/mod.rs new file mode 100644 index 00000000..b6f35e79 --- /dev/null +++ b/src/service/rooms/search/mod.rs @@ -0,0 +1,26 @@ +mod data; + +pub use data::Data; + +use crate::Result; +use ruma::RoomId; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + #[tracing::instrument(skip(self))] + pub fn index_pdu<'a>(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()> { + self.db.index_pdu(shortroomid, pdu_id, message_body) + } + + #[tracing::instrument(skip(self))] + pub fn search_pdus<'a>( + &'a self, + room_id: &RoomId, + search_string: &str, + ) -> Result> + 'a, Vec)>> { + self.db.search_pdus(room_id, search_string) + } +} diff --git a/src/service/rooms/short/data.rs b/src/service/rooms/short/data.rs new file mode 100644 index 00000000..652c525b --- /dev/null +++ b/src/service/rooms/short/data.rs @@ -0,0 +1,31 @@ +use std::sync::Arc; + +use crate::Result; +use ruma::{events::StateEventType, EventId, RoomId}; + +pub trait Data: Send + Sync { + fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result; + + fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result>; + + fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result; + + fn get_eventid_from_short(&self, shorteventid: u64) -> Result>; + + fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)>; + + /// Returns (shortstatehash, already_existed) + fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)>; + + fn get_shortroomid(&self, room_id: &RoomId) -> Result>; + + fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result; +} diff --git a/src/service/rooms/short/mod.rs b/src/service/rooms/short/mod.rs new file mode 100644 index 00000000..45fadd74 --- /dev/null +++ b/src/service/rooms/short/mod.rs @@ -0,0 +1,54 @@ +mod data; +use std::sync::Arc; + +pub use data::Data; +use ruma::{events::StateEventType, EventId, RoomId}; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + pub fn get_or_create_shorteventid(&self, event_id: &EventId) -> Result { + self.db.get_or_create_shorteventid(event_id) + } + + pub fn get_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result> { + self.db.get_shortstatekey(event_type, state_key) + } + + pub fn get_or_create_shortstatekey( + &self, + event_type: &StateEventType, + state_key: &str, + ) -> Result { + self.db.get_or_create_shortstatekey(event_type, state_key) + } + + pub fn get_eventid_from_short(&self, shorteventid: u64) -> Result> { + self.db.get_eventid_from_short(shorteventid) + } + + pub fn get_statekey_from_short(&self, shortstatekey: u64) -> Result<(StateEventType, String)> { + self.db.get_statekey_from_short(shortstatekey) + } + + /// Returns (shortstatehash, already_existed) + pub fn get_or_create_shortstatehash(&self, state_hash: &[u8]) -> Result<(u64, bool)> { + self.db.get_or_create_shortstatehash(state_hash) + } + + pub fn get_shortroomid(&self, room_id: &RoomId) -> Result> { + self.db.get_shortroomid(room_id) + } + + pub fn get_or_create_shortroomid(&self, room_id: &RoomId) -> Result { + self.db.get_or_create_shortroomid(room_id) + } +} diff --git a/src/service/rooms/state/data.rs b/src/service/rooms/state/data.rs new file mode 100644 index 00000000..f52ea72b --- /dev/null +++ b/src/service/rooms/state/data.rs @@ -0,0 +1,31 @@ +use crate::Result; +use ruma::{EventId, OwnedEventId, RoomId}; +use std::{collections::HashSet, sync::Arc}; +use tokio::sync::MutexGuard; + +pub trait Data: Send + Sync { + /// Returns the last state hash key added to the db for the given room. + fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result>; + + /// Set the state hash to a new version, but does not update state_cache. + fn set_room_state( + &self, + room_id: &RoomId, + new_shortstatehash: u64, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()>; + + /// Associates a state with an event. + fn set_event_state(&self, shorteventid: u64, shortstatehash: u64) -> Result<()>; + + /// Returns all events we would send as the prev_events of the next event. + fn get_forward_extremities(&self, room_id: &RoomId) -> Result>>; + + /// Replace the forward extremities of the room. + fn set_forward_extremities<'a>( + &self, + room_id: &RoomId, + event_ids: Vec, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()>; +} diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs new file mode 100644 index 00000000..0e450322 --- /dev/null +++ b/src/service/rooms/state/mod.rs @@ -0,0 +1,421 @@ +mod data; +use std::{ + collections::{HashMap, HashSet}, + sync::Arc, +}; + +pub use data::Data; +use ruma::{ + events::{ + room::{create::RoomCreateEventContent, member::MembershipState}, + AnyStrippedStateEvent, RoomEventType, StateEventType, + }, + serde::Raw, + state_res::{self, StateMap}, + EventId, OwnedEventId, RoomId, RoomVersionId, UserId, +}; +use serde::Deserialize; +use tokio::sync::MutexGuard; +use tracing::warn; + +use crate::{services, utils::calculate_hash, Error, PduEvent, Result}; + +use super::state_compressor::CompressedStateEvent; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Set the room to the given statehash and update caches. + pub async fn force_state( + &self, + room_id: &RoomId, + shortstatehash: u64, + statediffnew: HashSet, + _statediffremoved: HashSet, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + for event_id in statediffnew.into_iter().filter_map(|new| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(&new) + .ok() + .map(|(_, id)| id) + }) { + let pdu = match services().rooms.timeline.get_pdu_json(&event_id)? { + Some(pdu) => pdu, + None => continue, + }; + + if pdu.get("type").and_then(|val| val.as_str()) != Some("m.room.member") { + continue; + } + + let pdu: PduEvent = match serde_json::from_str( + &serde_json::to_string(&pdu).expect("CanonicalJsonObj can be serialized to JSON"), + ) { + Ok(pdu) => pdu, + Err(_) => continue, + }; + + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + + let membership = match serde_json::from_str::(pdu.content.get()) { + Ok(e) => e.membership, + Err(_) => continue, + }; + + let state_key = match pdu.state_key { + Some(k) => k, + None => continue, + }; + + let user_id = match UserId::parse(state_key) { + Ok(id) => id, + Err(_) => continue, + }; + + services().rooms.state_cache.update_membership( + room_id, + &user_id, + membership, + &pdu.sender, + None, + false, + )?; + } + + services().rooms.state_cache.update_joined_count(room_id)?; + + self.db + .set_room_state(room_id, shortstatehash, state_lock)?; + + Ok(()) + } + + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + #[tracing::instrument(skip(self, state_ids_compressed))] + pub fn set_event_state( + &self, + event_id: &EventId, + room_id: &RoomId, + state_ids_compressed: HashSet, + ) -> Result { + let shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(event_id)?; + + let previous_shortstatehash = self.db.get_room_shortstatehash(room_id)?; + + let state_hash = calculate_hash( + &state_ids_compressed + .iter() + .map(|s| &s[..]) + .collect::>(), + ); + + let (shortstatehash, already_existed) = services() + .rooms + .short + .get_or_create_shortstatehash(&state_hash)?; + + if !already_existed { + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(p) + }, + )?; + + let (statediffnew, statediffremoved) = + if let Some(parent_stateinfo) = states_parents.last() { + let statediffnew: HashSet<_> = state_ids_compressed + .difference(&parent_stateinfo.1) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .1 + .difference(&state_ids_compressed) + .copied() + .collect(); + + (statediffnew, statediffremoved) + } else { + (state_ids_compressed, HashSet::new()) + }; + services().rooms.state_compressor.save_state_from_diff( + shortstatehash, + statediffnew, + statediffremoved, + 1_000_000, // high number because no state will be based on this one + states_parents, + )?; + } + + self.db.set_event_state(shorteventid, shortstatehash)?; + + Ok(shortstatehash) + } + + /// Generates a new StateHash and associates it with the incoming event. + /// + /// This adds all current state events (not including the incoming event) + /// to `stateid_pduid` and adds the incoming event to `eventid_statehash`. + #[tracing::instrument(skip(self, new_pdu))] + pub fn append_to_state(&self, new_pdu: &PduEvent) -> Result { + let shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(&new_pdu.event_id)?; + + let previous_shortstatehash = self.get_room_shortstatehash(&new_pdu.room_id)?; + + if let Some(p) = previous_shortstatehash { + self.db.set_event_state(shorteventid, p)?; + } + + if let Some(state_key) = &new_pdu.state_key { + let states_parents = previous_shortstatehash.map_or_else( + || Ok(Vec::new()), + |p| { + services() + .rooms + .state_compressor + .load_shortstatehash_info(p) + }, + )?; + + let shortstatekey = services() + .rooms + .short + .get_or_create_shortstatekey(&new_pdu.kind.to_string().into(), state_key)?; + + let new = services() + .rooms + .state_compressor + .compress_state_event(shortstatekey, &new_pdu.event_id)?; + + let replaces = states_parents + .last() + .map(|info| { + info.1 + .iter() + .find(|bytes| bytes.starts_with(&shortstatekey.to_be_bytes())) + }) + .unwrap_or_default(); + + if Some(&new) == replaces { + return Ok(previous_shortstatehash.expect("must exist")); + } + + // TODO: statehash with deterministic inputs + let shortstatehash = services().globals.next_count()?; + + let mut statediffnew = HashSet::new(); + statediffnew.insert(new); + + let mut statediffremoved = HashSet::new(); + if let Some(replaces) = replaces { + statediffremoved.insert(*replaces); + } + + services().rooms.state_compressor.save_state_from_diff( + shortstatehash, + statediffnew, + statediffremoved, + 2, + states_parents, + )?; + + Ok(shortstatehash) + } else { + Ok(previous_shortstatehash.expect("first event in room must be a state event")) + } + } + + #[tracing::instrument(skip(self, invite_event))] + pub fn calculate_invite_state( + &self, + invite_event: &PduEvent, + ) -> Result>> { + let mut state = Vec::new(); + // Add recommended events + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomCreate, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomJoinRules, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomCanonicalAlias, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomAvatar, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomName, + "", + )? { + state.push(e.to_stripped_state_event()); + } + if let Some(e) = services().rooms.state_accessor.room_state_get( + &invite_event.room_id, + &StateEventType::RoomMember, + invite_event.sender.as_str(), + )? { + state.push(e.to_stripped_state_event()); + } + + state.push(invite_event.to_stripped_state_event()); + Ok(state) + } + + /// Set the state hash to a new version, but does not update state_cache. + #[tracing::instrument(skip(self))] + pub fn set_room_state( + &self, + room_id: &RoomId, + shortstatehash: u64, + mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.db.set_room_state(room_id, shortstatehash, mutex_lock) + } + + /// Returns the room's version. + #[tracing::instrument(skip(self))] + pub fn get_room_version(&self, room_id: &RoomId) -> Result { + let create_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + let room_version = create_event_content + .map(|create_event| create_event.room_version) + .ok_or(Error::BadDatabase("Invalid room version"))?; + Ok(room_version) + } + + pub fn get_room_shortstatehash(&self, room_id: &RoomId) -> Result> { + self.db.get_room_shortstatehash(room_id) + } + + pub fn get_forward_extremities(&self, room_id: &RoomId) -> Result>> { + self.db.get_forward_extremities(room_id) + } + + pub fn set_forward_extremities<'a>( + &self, + room_id: &RoomId, + event_ids: Vec, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<()> { + self.db + .set_forward_extremities(room_id, event_ids, state_lock) + } + + /// This fetches auth events from the current state. + #[tracing::instrument(skip(self))] + pub fn get_auth_events( + &self, + room_id: &RoomId, + kind: &RoomEventType, + sender: &UserId, + state_key: Option<&str>, + content: &serde_json::value::RawValue, + ) -> Result>> { + let shortstatehash = if let Some(current_shortstatehash) = + services().rooms.state.get_room_shortstatehash(room_id)? + { + current_shortstatehash + } else { + return Ok(HashMap::new()); + }; + + let auth_events = state_res::auth_types_for_event(kind, sender, state_key, content) + .expect("content is a valid JSON object"); + + let mut sauthevents = auth_events + .into_iter() + .filter_map(|(event_type, state_key)| { + services() + .rooms + .short + .get_shortstatekey(&event_type.to_string().into(), &state_key) + .ok() + .flatten() + .map(|s| (s, (event_type, state_key))) + }) + .collect::>(); + + let full_state = services() + .rooms + .state_compressor + .load_shortstatehash_info(shortstatehash)? + .pop() + .expect("there is always one layer") + .1; + + Ok(full_state + .into_iter() + .filter_map(|compressed| { + services() + .rooms + .state_compressor + .parse_compressed_state_event(&compressed) + .ok() + }) + .filter_map(|(shortstatekey, event_id)| { + sauthevents.remove(&shortstatekey).map(|k| (k, event_id)) + }) + .filter_map(|(k, event_id)| { + services() + .rooms + .timeline + .get_pdu(&event_id) + .ok() + .flatten() + .map(|pdu| (k, pdu)) + }) + .collect()) + } +} diff --git a/src/service/rooms/state_accessor/data.rs b/src/service/rooms/state_accessor/data.rs new file mode 100644 index 00000000..340b19c3 --- /dev/null +++ b/src/service/rooms/state_accessor/data.rs @@ -0,0 +1,62 @@ +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + +use async_trait::async_trait; +use ruma::{events::StateEventType, EventId, RoomId}; + +use crate::{PduEvent, Result}; + +#[async_trait] +pub trait Data: Send + Sync { + /// Builds a StateMap by iterating over all keys that start + /// with state_hash, this gives the full state for the given state_hash. + async fn state_full_ids(&self, shortstatehash: u64) -> Result>>; + + async fn state_full( + &self, + shortstatehash: u64, + ) -> Result>>; + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + fn state_get_id( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result>>; + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + fn state_get( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result>>; + + /// Returns the state hash for this pdu. + fn pdu_shortstatehash(&self, event_id: &EventId) -> Result>; + + /// Returns the full room state. + async fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result>>; + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result>>; + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result>>; +} diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs new file mode 100644 index 00000000..1a9c4a9e --- /dev/null +++ b/src/service/rooms/state_accessor/mod.rs @@ -0,0 +1,87 @@ +mod data; +use std::{ + collections::{BTreeMap, HashMap}, + sync::Arc, +}; + +pub use data::Data; +use ruma::{events::StateEventType, EventId, RoomId}; + +use crate::{PduEvent, Result}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Builds a StateMap by iterating over all keys that start + /// with state_hash, this gives the full state for the given state_hash. + pub async fn state_full_ids(&self, shortstatehash: u64) -> Result>> { + self.db.state_full_ids(shortstatehash).await + } + + pub async fn state_full( + &self, + shortstatehash: u64, + ) -> Result>> { + self.db.state_full(shortstatehash).await + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] + pub fn state_get_id( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + self.db.state_get_id(shortstatehash, event_type, state_key) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] + pub fn state_get( + &self, + shortstatehash: u64, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + self.db.state_get(shortstatehash, event_type, state_key) + } + + /// Returns the state hash for this pdu. + pub fn pdu_shortstatehash(&self, event_id: &EventId) -> Result> { + self.db.pdu_shortstatehash(event_id) + } + + /// Returns the full room state. + #[tracing::instrument(skip(self))] + pub async fn room_state_full( + &self, + room_id: &RoomId, + ) -> Result>> { + self.db.room_state_full(room_id).await + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] + pub fn room_state_get_id( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + self.db.room_state_get_id(room_id, event_type, state_key) + } + + /// Returns a single PDU from `room_id` with key (`event_type`, `state_key`). + #[tracing::instrument(skip(self))] + pub fn room_state_get( + &self, + room_id: &RoomId, + event_type: &StateEventType, + state_key: &str, + ) -> Result>> { + self.db.room_state_get(room_id, event_type, state_key) + } +} diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs new file mode 100644 index 00000000..42de56d2 --- /dev/null +++ b/src/service/rooms/state_cache/data.rs @@ -0,0 +1,111 @@ +use std::{collections::HashSet, sync::Arc}; + +use crate::Result; +use ruma::{ + events::{AnyStrippedStateEvent, AnySyncStateEvent}, + serde::Raw, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; + +pub trait Data: Send + Sync { + fn mark_as_once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + fn mark_as_invited( + &self, + user_id: &UserId, + room_id: &RoomId, + last_state: Option>>, + ) -> Result<()>; + fn mark_as_left(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + + fn update_joined_count(&self, room_id: &RoomId) -> Result<()>; + + fn get_our_real_users(&self, room_id: &RoomId) -> Result>>; + + fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &(String, serde_yaml::Value), + ) -> Result; + + /// Makes a user forget a room. + fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()>; + + /// Returns an iterator of all servers participating in this room. + fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a>; + + fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result; + + /// Returns an iterator of all rooms a server participates in (as far as we know). + fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> Box> + 'a>; + + /// Returns an iterator over all joined members of a room. + fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a>; + + fn room_joined_count(&self, room_id: &RoomId) -> Result>; + + fn room_invited_count(&self, room_id: &RoomId) -> Result>; + + /// Returns an iterator over all User IDs who ever joined a room. + fn room_useroncejoined<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a>; + + /// Returns an iterator over all invited members of a room. + fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> Box> + 'a>; + + fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result>; + + /// Returns an iterator over all rooms this user joined. + fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> Box> + 'a>; + + /// Returns an iterator over all rooms a user was invited to. + fn rooms_invited<'a>( + &'a self, + user_id: &UserId, + ) -> Box>)>> + 'a>; + + fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>>; + + fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>>; + + /// Returns an iterator over all rooms a user left. + fn rooms_left<'a>( + &'a self, + user_id: &UserId, + ) -> Box>)>> + 'a>; + + fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result; +} diff --git a/src/service/rooms/state_cache/mod.rs b/src/service/rooms/state_cache/mod.rs new file mode 100644 index 00000000..6c9bed35 --- /dev/null +++ b/src/service/rooms/state_cache/mod.rs @@ -0,0 +1,348 @@ +mod data; +use std::{collections::HashSet, sync::Arc}; + +pub use data::Data; + +use ruma::{ + events::{ + direct::DirectEvent, + ignored_user_list::IgnoredUserListEvent, + room::{create::RoomCreateEventContent, member::MembershipState}, + AnyStrippedStateEvent, AnySyncStateEvent, GlobalAccountDataEventType, + RoomAccountDataEventType, StateEventType, + }, + serde::Raw, + OwnedRoomId, OwnedServerName, OwnedUserId, RoomId, ServerName, UserId, +}; + +use crate::{services, Error, Result}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Update current membership data. + #[tracing::instrument(skip(self, last_state))] + pub fn update_membership( + &self, + room_id: &RoomId, + user_id: &UserId, + membership: MembershipState, + sender: &UserId, + last_state: Option>>, + update_joined_count: bool, + ) -> Result<()> { + // Keep track what remote users exist by adding them as "deactivated" users + if user_id.server_name() != services().globals.server_name() { + services().users.create(user_id, None)?; + // TODO: displayname, avatar url + } + + match &membership { + MembershipState::Join => { + // Check if the user never joined this room + if !self.once_joined(user_id, room_id)? { + // Add the user ID to the join list then + self.db.mark_as_once_joined(user_id, room_id)?; + + // Check if the room has a predecessor + if let Some(predecessor) = services() + .rooms + .state_accessor + .room_state_get(room_id, &StateEventType::RoomCreate, "")? + .and_then(|create| serde_json::from_str(create.content.get()).ok()) + .and_then(|content: RoomCreateEventContent| content.predecessor) + { + // Copy user settings from predecessor to the current room: + // - Push rules + // + // TODO: finish this once push rules are implemented. + // + // let mut push_rules_event_content: PushRulesEvent = account_data + // .get( + // None, + // user_id, + // EventType::PushRules, + // )?; + // + // NOTE: find where `predecessor.room_id` match + // and update to `room_id`. + // + // account_data + // .update( + // None, + // user_id, + // EventType::PushRules, + // &push_rules_event_content, + // globals, + // ) + // .ok(); + + // Copy old tags to new room + if let Some(tag_event) = services() + .account_data + .get( + Some(&predecessor.room_id), + user_id, + RoomAccountDataEventType::Tag, + )? + .map(|event| { + serde_json::from_str(event.get()).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) + }) + { + services() + .account_data + .update( + Some(room_id), + user_id, + RoomAccountDataEventType::Tag, + &tag_event?, + ) + .ok(); + }; + + // Copy direct chat flag + if let Some(direct_event) = services() + .account_data + .get( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + )? + .map(|event| { + serde_json::from_str::(event.get()).map_err(|_| { + Error::bad_database("Invalid account data event in db.") + }) + }) + { + let mut direct_event = direct_event?; + let mut room_ids_updated = false; + + for room_ids in direct_event.content.0.values_mut() { + if room_ids.iter().any(|r| r == &predecessor.room_id) { + room_ids.push(room_id.to_owned()); + room_ids_updated = true; + } + } + + if room_ids_updated { + services().account_data.update( + None, + user_id, + GlobalAccountDataEventType::Direct.to_string().into(), + &serde_json::to_value(&direct_event) + .expect("to json always works"), + )?; + } + }; + } + } + + self.db.mark_as_joined(user_id, room_id)?; + } + MembershipState::Invite => { + // We want to know if the sender is ignored by the receiver + let is_ignored = services() + .account_data + .get( + None, // Ignored users are in global account data + user_id, // Receiver + GlobalAccountDataEventType::IgnoredUserList + .to_string() + .into(), + )? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid account data event in db.")) + }) + .transpose()? + .map_or(false, |ignored| { + ignored + .content + .ignored_users + .iter() + .any(|user| user == sender) + }); + + if is_ignored { + return Ok(()); + } + + self.db.mark_as_invited(user_id, room_id, last_state)?; + } + MembershipState::Leave | MembershipState::Ban => { + self.db.mark_as_left(user_id, room_id)?; + } + _ => {} + } + + if update_joined_count { + self.update_joined_count(room_id)?; + } + + Ok(()) + } + + #[tracing::instrument(skip(self, room_id))] + pub fn update_joined_count(&self, room_id: &RoomId) -> Result<()> { + self.db.update_joined_count(room_id) + } + + #[tracing::instrument(skip(self, room_id))] + pub fn get_our_real_users(&self, room_id: &RoomId) -> Result>> { + self.db.get_our_real_users(room_id) + } + + #[tracing::instrument(skip(self, room_id, appservice))] + pub fn appservice_in_room( + &self, + room_id: &RoomId, + appservice: &(String, serde_yaml::Value), + ) -> Result { + self.db.appservice_in_room(room_id, appservice) + } + + /// Makes a user forget a room. + #[tracing::instrument(skip(self))] + pub fn forget(&self, room_id: &RoomId, user_id: &UserId) -> Result<()> { + self.db.forget(room_id, user_id) + } + + /// Returns an iterator of all servers participating in this room. + #[tracing::instrument(skip(self))] + pub fn room_servers<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { + self.db.room_servers(room_id) + } + + #[tracing::instrument(skip(self))] + pub fn server_in_room<'a>(&'a self, server: &ServerName, room_id: &RoomId) -> Result { + self.db.server_in_room(server, room_id) + } + + /// Returns an iterator of all rooms a server participates in (as far as we know). + #[tracing::instrument(skip(self))] + pub fn server_rooms<'a>( + &'a self, + server: &ServerName, + ) -> impl Iterator> + 'a { + self.db.server_rooms(server) + } + + /// Returns an iterator over all joined members of a room. + #[tracing::instrument(skip(self))] + pub fn room_members<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { + self.db.room_members(room_id) + } + + #[tracing::instrument(skip(self))] + pub fn room_joined_count(&self, room_id: &RoomId) -> Result> { + self.db.room_joined_count(room_id) + } + + #[tracing::instrument(skip(self))] + pub fn room_invited_count(&self, room_id: &RoomId) -> Result> { + self.db.room_invited_count(room_id) + } + + /// Returns an iterator over all User IDs who ever joined a room. + #[tracing::instrument(skip(self))] + pub fn room_useroncejoined<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { + self.db.room_useroncejoined(room_id) + } + + /// Returns an iterator over all invited members of a room. + #[tracing::instrument(skip(self))] + pub fn room_members_invited<'a>( + &'a self, + room_id: &RoomId, + ) -> impl Iterator> + 'a { + self.db.room_members_invited(room_id) + } + + #[tracing::instrument(skip(self))] + pub fn get_invite_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + self.db.get_invite_count(room_id, user_id) + } + + #[tracing::instrument(skip(self))] + pub fn get_left_count(&self, room_id: &RoomId, user_id: &UserId) -> Result> { + self.db.get_left_count(room_id, user_id) + } + + /// Returns an iterator over all rooms this user joined. + #[tracing::instrument(skip(self))] + pub fn rooms_joined<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { + self.db.rooms_joined(user_id) + } + + /// Returns an iterator over all rooms a user was invited to. + #[tracing::instrument(skip(self))] + pub fn rooms_invited<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator>)>> + 'a { + self.db.rooms_invited(user_id) + } + + #[tracing::instrument(skip(self))] + pub fn invite_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + self.db.invite_state(user_id, room_id) + } + + #[tracing::instrument(skip(self))] + pub fn left_state( + &self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result>>> { + self.db.left_state(user_id, room_id) + } + + /// Returns an iterator over all rooms a user left. + #[tracing::instrument(skip(self))] + pub fn rooms_left<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator>)>> + 'a { + self.db.rooms_left(user_id) + } + + #[tracing::instrument(skip(self))] + pub fn once_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.once_joined(user_id, room_id) + } + + #[tracing::instrument(skip(self))] + pub fn is_joined(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.is_joined(user_id, room_id) + } + + #[tracing::instrument(skip(self))] + pub fn is_invited(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.is_invited(user_id, room_id) + } + + #[tracing::instrument(skip(self))] + pub fn is_left(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.is_left(user_id, room_id) + } +} diff --git a/src/service/rooms/state_compressor/data.rs b/src/service/rooms/state_compressor/data.rs new file mode 100644 index 00000000..ce164c6d --- /dev/null +++ b/src/service/rooms/state_compressor/data.rs @@ -0,0 +1,15 @@ +use std::collections::HashSet; + +use super::CompressedStateEvent; +use crate::Result; + +pub struct StateDiff { + pub parent: Option, + pub added: HashSet, + pub removed: HashSet, +} + +pub trait Data: Send + Sync { + fn get_statediff(&self, shortstatehash: u64) -> Result; + fn save_statediff(&self, shortstatehash: u64, diff: StateDiff) -> Result<()>; +} diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs new file mode 100644 index 00000000..356f32c8 --- /dev/null +++ b/src/service/rooms/state_compressor/mod.rs @@ -0,0 +1,310 @@ +pub mod data; +use std::{ + collections::HashSet, + mem::size_of, + sync::{Arc, Mutex}, +}; + +pub use data::Data; +use lru_cache::LruCache; +use ruma::{EventId, RoomId}; + +use crate::{services, utils, Result}; + +use self::data::StateDiff; + +pub struct Service { + pub db: &'static dyn Data, + + pub stateinfo_cache: Mutex< + LruCache< + u64, + Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + >, + >, +} + +pub type CompressedStateEvent = [u8; 2 * size_of::()]; + +impl Service { + /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. + #[tracing::instrument(skip(self))] + pub fn load_shortstatehash_info( + &self, + shortstatehash: u64, + ) -> Result< + Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + > { + if let Some(r) = self + .stateinfo_cache + .lock() + .unwrap() + .get_mut(&shortstatehash) + { + return Ok(r.clone()); + } + + let StateDiff { + parent, + added, + removed, + } = self.db.get_statediff(shortstatehash)?; + + if let Some(parent) = parent { + let mut response = self.load_shortstatehash_info(parent)?; + let mut state = response.last().unwrap().1.clone(); + state.extend(added.iter().copied()); + for r in &removed { + state.remove(r); + } + + response.push((shortstatehash, state, added, removed)); + + Ok(response) + } else { + let response = vec![(shortstatehash, added.clone(), added, removed)]; + self.stateinfo_cache + .lock() + .unwrap() + .insert(shortstatehash, response.clone()); + Ok(response) + } + } + + pub fn compress_state_event( + &self, + shortstatekey: u64, + event_id: &EventId, + ) -> Result { + let mut v = shortstatekey.to_be_bytes().to_vec(); + v.extend_from_slice( + &services() + .rooms + .short + .get_or_create_shorteventid(event_id)? + .to_be_bytes(), + ); + Ok(v.try_into().expect("we checked the size above")) + } + + /// Returns shortstatekey, event id + pub fn parse_compressed_state_event( + &self, + compressed_event: &CompressedStateEvent, + ) -> Result<(u64, Arc)> { + Ok(( + utils::u64_from_bytes(&compressed_event[0..size_of::()]) + .expect("bytes have right length"), + services().rooms.short.get_eventid_from_short( + utils::u64_from_bytes(&compressed_event[size_of::()..]) + .expect("bytes have right length"), + )?, + )) + } + + /// Creates a new shortstatehash that often is just a diff to an already existing + /// shortstatehash and therefore very efficient. + /// + /// There are multiple layers of diffs. The bottom layer 0 always contains the full state. Layer + /// 1 contains diffs to states of layer 0, layer 2 diffs to layer 1 and so on. If layer n > 0 + /// grows too big, it will be combined with layer n-1 to create a new diff on layer n-1 that's + /// based on layer n-2. If that layer is also too big, it will recursively fix above layers too. + /// + /// * `shortstatehash` - Shortstatehash of this state + /// * `statediffnew` - Added to base. Each vec is shortstatekey+shorteventid + /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid + /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer + /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer + #[tracing::instrument(skip( + self, + statediffnew, + statediffremoved, + diff_to_sibling, + parent_states + ))] + pub fn save_state_from_diff( + &self, + shortstatehash: u64, + statediffnew: HashSet, + statediffremoved: HashSet, + diff_to_sibling: usize, + mut parent_states: Vec<( + u64, // sstatehash + HashSet, // full state + HashSet, // added + HashSet, // removed + )>, + ) -> Result<()> { + let diffsum = statediffnew.len() + statediffremoved.len(); + + if parent_states.len() > 3 { + // Number of layers + // To many layers, we have to go deeper + let parent = parent_states.pop().unwrap(); + + let mut parent_new = parent.2; + let mut parent_removed = parent.3; + + for removed in statediffremoved { + if !parent_new.remove(&removed) { + // It was not added in the parent and we removed it + parent_removed.insert(removed); + } + // Else it was added in the parent and we removed it again. We can forget this change + } + + for new in statediffnew { + if !parent_removed.remove(&new) { + // It was not touched in the parent and we added it + parent_new.insert(new); + } + // Else it was removed in the parent and we added it again. We can forget this change + } + + self.save_state_from_diff( + shortstatehash, + parent_new, + parent_removed, + diffsum, + parent_states, + )?; + + return Ok(()); + } + + if parent_states.is_empty() { + // There is no parent layer, create a new state + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: None, + added: statediffnew, + removed: statediffremoved, + }, + )?; + + return Ok(()); + }; + + // Else we have two options. + // 1. We add the current diff on top of the parent layer. + // 2. We replace a layer above + + let parent = parent_states.pop().unwrap(); + let parent_diff = parent.2.len() + parent.3.len(); + + if diffsum * diffsum >= 2 * diff_to_sibling * parent_diff { + // Diff too big, we replace above layer(s) + let mut parent_new = parent.2; + let mut parent_removed = parent.3; + + for removed in statediffremoved { + if !parent_new.remove(&removed) { + // It was not added in the parent and we removed it + parent_removed.insert(removed); + } + // Else it was added in the parent and we removed it again. We can forget this change + } + + for new in statediffnew { + if !parent_removed.remove(&new) { + // It was not touched in the parent and we added it + parent_new.insert(new); + } + // Else it was removed in the parent and we added it again. We can forget this change + } + + self.save_state_from_diff( + shortstatehash, + parent_new, + parent_removed, + diffsum, + parent_states, + )?; + } else { + // Diff small enough, we add diff as layer on top of parent + self.db.save_statediff( + shortstatehash, + StateDiff { + parent: Some(parent.0), + added: statediffnew, + removed: statediffremoved, + }, + )?; + } + + Ok(()) + } + + /// Returns the new shortstatehash, and the state diff from the previous room state + pub fn save_state( + &self, + room_id: &RoomId, + new_state_ids_compressed: HashSet, + ) -> Result<( + u64, + HashSet, + HashSet, + )> { + let previous_shortstatehash = services().rooms.state.get_room_shortstatehash(room_id)?; + + let state_hash = utils::calculate_hash( + &new_state_ids_compressed + .iter() + .map(|bytes| &bytes[..]) + .collect::>(), + ); + + let (new_shortstatehash, already_existed) = services() + .rooms + .short + .get_or_create_shortstatehash(&state_hash)?; + + if Some(new_shortstatehash) == previous_shortstatehash { + return Ok((new_shortstatehash, HashSet::new(), HashSet::new())); + } + + let states_parents = previous_shortstatehash + .map_or_else(|| Ok(Vec::new()), |p| self.load_shortstatehash_info(p))?; + + let (statediffnew, statediffremoved) = if let Some(parent_stateinfo) = states_parents.last() + { + let statediffnew: HashSet<_> = new_state_ids_compressed + .difference(&parent_stateinfo.1) + .copied() + .collect(); + + let statediffremoved: HashSet<_> = parent_stateinfo + .1 + .difference(&new_state_ids_compressed) + .copied() + .collect(); + + (statediffnew, statediffremoved) + } else { + (new_state_ids_compressed, HashSet::new()) + }; + + if !already_existed { + self.save_state_from_diff( + new_shortstatehash, + statediffnew.clone(), + statediffremoved.clone(), + 2, // every state change is 2 event changes on average + states_parents, + )?; + }; + + Ok((new_shortstatehash, statediffnew, statediffremoved)) + } +} diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs new file mode 100644 index 00000000..9377af07 --- /dev/null +++ b/src/service/rooms/timeline/data.rs @@ -0,0 +1,87 @@ +use std::sync::Arc; + +use ruma::{CanonicalJsonObject, EventId, OwnedUserId, RoomId, UserId}; + +use crate::{PduEvent, Result}; + +pub trait Data: Send + Sync { + fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>>; + fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result; + + /// Returns the `count` of this pdu's id. + fn get_pdu_count(&self, event_id: &EventId) -> Result>; + + /// Returns the json of a pdu. + fn get_pdu_json(&self, event_id: &EventId) -> Result>; + + /// Returns the json of a pdu. + fn get_non_outlier_pdu_json(&self, event_id: &EventId) -> Result>; + + /// Returns the pdu's id. + fn get_pdu_id(&self, event_id: &EventId) -> Result>>; + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result>; + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + fn get_pdu(&self, event_id: &EventId) -> Result>>; + + /// Returns the pdu. + /// + /// This does __NOT__ check the outliers `Tree`. + fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result>; + + /// Returns the pdu as a `BTreeMap`. + fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result>; + + /// Returns the `count` of this pdu's id. + fn pdu_count(&self, pdu_id: &[u8]) -> Result; + + /// Adds a new pdu to the timeline + fn append_pdu( + &self, + pdu_id: &[u8], + pdu: &PduEvent, + json: &CanonicalJsonObject, + count: u64, + ) -> Result<()>; + + /// Removes a pdu and creates a new one with the same id. + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()>; + + /// Returns an iterator over all events in a room that happened after the event with id `since` + /// in chronological order. + fn pdus_since<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + since: u64, + ) -> Result, PduEvent)>> + 'a>>; + + /// Returns an iterator over all events and their tokens in a room that happened before the + /// event with id `until` in reverse-chronological order. + fn pdus_until<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + until: u64, + ) -> Result, PduEvent)>> + 'a>>; + + fn pdus_after<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + from: u64, + ) -> Result, PduEvent)>> + 'a>>; + + fn increment_notification_counts( + &self, + room_id: &RoomId, + notifies: Vec, + highlights: Vec, + ) -> Result<()>; +} diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs new file mode 100644 index 00000000..619dca28 --- /dev/null +++ b/src/service/rooms/timeline/mod.rs @@ -0,0 +1,831 @@ +mod data; + +use std::collections::HashMap; + +use std::{ + collections::HashSet, + sync::{Arc, Mutex}, +}; + +pub use data::Data; +use regex::Regex; +use ruma::{ + api::client::error::ErrorKind, + canonical_json::to_canonical_value, + events::{ + push_rules::PushRulesEvent, + room::{ + create::RoomCreateEventContent, member::MembershipState, + power_levels::RoomPowerLevelsEventContent, + }, + GlobalAccountDataEventType, RoomEventType, StateEventType, + }, + push::{Action, Ruleset, Tweak}, + state_res, + state_res::RoomVersion, + uint, CanonicalJsonObject, CanonicalJsonValue, EventId, OwnedEventId, OwnedRoomId, + OwnedServerName, RoomAliasId, RoomId, UserId, +}; +use serde::Deserialize; +use serde_json::value::to_raw_value; +use tokio::sync::MutexGuard; +use tracing::{error, warn}; + +use crate::{ + service::pdu::{EventHash, PduBuilder}, + services, utils, Error, PduEvent, Result, +}; + +use super::state_compressor::CompressedStateEvent; + +pub struct Service { + pub db: &'static dyn Data, + + pub lasttimelinecount_cache: Mutex>, +} + +impl Service { + #[tracing::instrument(skip(self))] + pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { + self.db.first_pdu_in_room(room_id) + } + + #[tracing::instrument(skip(self))] + pub fn last_timeline_count(&self, sender_user: &UserId, room_id: &RoomId) -> Result { + self.db.last_timeline_count(sender_user, room_id) + } + + // TODO Is this the same as the function above? + /* + #[tracing::instrument(skip(self))] + pub fn latest_pdu_count(&self, room_id: &RoomId) -> Result { + let prefix = self + .get_shortroomid(room_id)? + .expect("room exists") + .to_be_bytes() + .to_vec(); + + let mut last_possible_key = prefix.clone(); + last_possible_key.extend_from_slice(&u64::MAX.to_be_bytes()); + + self.pduid_pdu + .iter_from(&last_possible_key, true) + .take_while(move |(k, _)| k.starts_with(&prefix)) + .next() + .map(|b| self.pdu_count(&b.0)) + .transpose() + .map(|op| op.unwrap_or_default()) + } + */ + + /// Returns the `count` of this pdu's id. + pub fn get_pdu_count(&self, event_id: &EventId) -> Result> { + self.db.get_pdu_count(event_id) + } + + /// Returns the json of a pdu. + pub fn get_pdu_json(&self, event_id: &EventId) -> Result> { + self.db.get_pdu_json(event_id) + } + + /// Returns the json of a pdu. + pub fn get_non_outlier_pdu_json( + &self, + event_id: &EventId, + ) -> Result> { + self.db.get_non_outlier_pdu_json(event_id) + } + + /// Returns the pdu's id. + pub fn get_pdu_id(&self, event_id: &EventId) -> Result>> { + self.db.get_pdu_id(event_id) + } + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub fn get_non_outlier_pdu(&self, event_id: &EventId) -> Result> { + self.db.get_non_outlier_pdu(event_id) + } + + /// Returns the pdu. + /// + /// Checks the `eventid_outlierpdu` Tree if not found in the timeline. + pub fn get_pdu(&self, event_id: &EventId) -> Result>> { + self.db.get_pdu(event_id) + } + + /// Returns the pdu. + /// + /// This does __NOT__ check the outliers `Tree`. + pub fn get_pdu_from_id(&self, pdu_id: &[u8]) -> Result> { + self.db.get_pdu_from_id(pdu_id) + } + + /// Returns the pdu as a `BTreeMap`. + pub fn get_pdu_json_from_id(&self, pdu_id: &[u8]) -> Result> { + self.db.get_pdu_json_from_id(pdu_id) + } + + /// Returns the `count` of this pdu's id. + pub fn pdu_count(&self, pdu_id: &[u8]) -> Result { + self.db.pdu_count(pdu_id) + } + + /// Removes a pdu and creates a new one with the same id. + #[tracing::instrument(skip(self))] + fn replace_pdu(&self, pdu_id: &[u8], pdu: &PduEvent) -> Result<()> { + self.db.replace_pdu(pdu_id, pdu) + } + + /// Creates a new persisted data unit and adds it to a room. + /// + /// By this point the incoming event should be fully authenticated, no auth happens + /// in `append_pdu`. + /// + /// Returns pdu id + #[tracing::instrument(skip(self, pdu, pdu_json, leaves))] + pub fn append_pdu<'a>( + &self, + pdu: &PduEvent, + mut pdu_json: CanonicalJsonObject, + leaves: Vec, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result> { + let shortroomid = services() + .rooms + .short + .get_shortroomid(&pdu.room_id)? + .expect("room exists"); + + // Make unsigned fields correct. This is not properly documented in the spec, but state + // events need to have previous content in the unsigned field, so clients can easily + // interpret things like membership changes + if let Some(state_key) = &pdu.state_key { + if let CanonicalJsonValue::Object(unsigned) = pdu_json + .entry("unsigned".to_owned()) + .or_insert_with(|| CanonicalJsonValue::Object(Default::default())) + { + if let Some(shortstatehash) = services() + .rooms + .state_accessor + .pdu_shortstatehash(&pdu.event_id) + .unwrap() + { + if let Some(prev_state) = services() + .rooms + .state_accessor + .state_get(shortstatehash, &pdu.kind.to_string().into(), state_key) + .unwrap() + { + unsigned.insert( + "prev_content".to_owned(), + CanonicalJsonValue::Object( + utils::to_canonical_object(prev_state.content.clone()) + .expect("event is valid, we just created it"), + ), + ); + } + } + } else { + error!("Invalid unsigned type in pdu."); + } + } + + // We must keep track of all events that have been referenced. + services() + .rooms + .pdu_metadata + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + services() + .rooms + .state + .set_forward_extremities(&pdu.room_id, leaves, state_lock)?; + + let mutex_insert = Arc::clone( + services() + .globals + .roomid_mutex_insert + .write() + .unwrap() + .entry(pdu.room_id.clone()) + .or_default(), + ); + let insert_lock = mutex_insert.lock().unwrap(); + + let count1 = services().globals.next_count()?; + // Mark as read first so the sending client doesn't get a notification even if appending + // fails + services() + .rooms + .edus + .read_receipt + .private_read_set(&pdu.room_id, &pdu.sender, count1)?; + services() + .rooms + .user + .reset_notification_counts(&pdu.sender, &pdu.room_id)?; + + let count2 = services().globals.next_count()?; + let mut pdu_id = shortroomid.to_be_bytes().to_vec(); + pdu_id.extend_from_slice(&count2.to_be_bytes()); + + // Insert pdu + self.db.append_pdu(&pdu_id, pdu, &pdu_json, count2)?; + + drop(insert_lock); + + // See if the event matches any known pushers + let power_levels: RoomPowerLevelsEventContent = services() + .rooms + .state_accessor + .room_state_get(&pdu.room_id, &StateEventType::RoomPowerLevels, "")? + .map(|ev| { + serde_json::from_str(ev.content.get()) + .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) + }) + .transpose()? + .unwrap_or_default(); + + let sync_pdu = pdu.to_sync_room_event(); + + let mut notifies = Vec::new(); + let mut highlights = Vec::new(); + + for user in services() + .rooms + .state_cache + .get_our_real_users(&pdu.room_id)? + .iter() + { + // Don't notify the user of their own events + if user == &pdu.sender { + continue; + } + + let rules_for_user = services() + .account_data + .get( + None, + user, + GlobalAccountDataEventType::PushRules.to_string().into(), + )? + .map(|event| { + serde_json::from_str::(event.get()) + .map_err(|_| Error::bad_database("Invalid push rules event in db.")) + }) + .transpose()? + .map(|ev: PushRulesEvent| ev.content.global) + .unwrap_or_else(|| Ruleset::server_default(user)); + + let mut highlight = false; + let mut notify = false; + + for action in services().pusher.get_actions( + user, + &rules_for_user, + &power_levels, + &sync_pdu, + &pdu.room_id, + )? { + match action { + Action::DontNotify => notify = false, + // TODO: Implement proper support for coalesce + Action::Notify | Action::Coalesce => notify = true, + Action::SetTweak(Tweak::Highlight(true)) => { + highlight = true; + } + _ => {} + }; + } + + if notify { + notifies.push(user.clone()); + } + + if highlight { + highlights.push(user.clone()); + } + + for push_key in services().pusher.get_pushkeys(user) { + services().sending.send_push_pdu(&pdu_id, user, push_key?)?; + } + } + + self.db + .increment_notification_counts(&pdu.room_id, notifies, highlights)?; + + match pdu.kind { + RoomEventType::RoomRedaction => { + if let Some(redact_id) = &pdu.redacts { + self.redact_pdu(redact_id, pdu)?; + } + } + RoomEventType::RoomMember => { + if let Some(state_key) = &pdu.state_key { + #[derive(Deserialize)] + struct ExtractMembership { + membership: MembershipState, + } + + // if the state_key fails + let target_user_id = UserId::parse(state_key.clone()) + .expect("This state_key was previously validated"); + + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + let invite_state = match content.membership { + MembershipState::Invite => { + let state = services().rooms.state.calculate_invite_state(pdu)?; + Some(state) + } + _ => None, + }; + + // Update our membership info, we do this here incase a user is invited + // and immediately leaves we need the DB to record the invite event for auth + services().rooms.state_cache.update_membership( + &pdu.room_id, + &target_user_id, + content.membership, + &pdu.sender, + invite_state, + true, + )?; + } + } + RoomEventType::RoomMessage => { + #[derive(Deserialize)] + struct ExtractBody { + body: Option, + } + + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + + if let Some(body) = content.body { + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, &body)?; + + let admin_room = services().rooms.alias.resolve_local_alias( + <&RoomAliasId>::try_from( + format!("#admins:{}", services().globals.server_name()).as_str(), + ) + .expect("#admins:server_name is a valid room alias"), + )?; + let server_user = format!("@conduit:{}", services().globals.server_name()); + + let to_conduit = body.starts_with(&format!("{}: ", server_user)); + + // This will evaluate to false if the emergency password is set up so that + // the administrator can execute commands as conduit + let from_conduit = pdu.sender == server_user + && services().globals.emergency_password().is_none(); + + if to_conduit && !from_conduit && admin_room.as_ref() == Some(&pdu.room_id) { + services().admin.process_message(body); + } + } + } + _ => {} + } + + for appservice in services().appservice.all()? { + if services() + .rooms + .state_cache + .appservice_in_room(&pdu.room_id, &appservice)? + { + services() + .sending + .send_pdu_appservice(appservice.0, pdu_id.clone())?; + continue; + } + + // If the RoomMember event has a non-empty state_key, it is targeted at someone. + // If it is our appservice user, we send this PDU to it. + if pdu.kind == RoomEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + if let Some(appservice_uid) = appservice + .1 + .get("sender_localpart") + .and_then(|string| string.as_str()) + .and_then(|string| { + UserId::parse_with_server_name(string, services().globals.server_name()) + .ok() + }) + { + if state_key_uid == &appservice_uid { + services() + .sending + .send_pdu_appservice(appservice.0, pdu_id.clone())?; + continue; + } + } + } + } + + if let Some(namespaces) = appservice.1.get("namespaces") { + let users = namespaces + .get("users") + .and_then(|users| users.as_sequence()) + .map_or_else(Vec::new, |users| { + users + .iter() + .filter_map(|users| Regex::new(users.get("regex")?.as_str()?).ok()) + .collect::>() + }); + let aliases = namespaces + .get("aliases") + .and_then(|aliases| aliases.as_sequence()) + .map_or_else(Vec::new, |aliases| { + aliases + .iter() + .filter_map(|aliases| Regex::new(aliases.get("regex")?.as_str()?).ok()) + .collect::>() + }); + let rooms = namespaces + .get("rooms") + .and_then(|rooms| rooms.as_sequence()); + + let matching_users = |users: &Regex| { + users.is_match(pdu.sender.as_str()) + || pdu.kind == RoomEventType::RoomMember + && pdu + .state_key + .as_ref() + .map_or(false, |state_key| users.is_match(state_key)) + }; + let matching_aliases = |aliases: &Regex| { + services() + .rooms + .alias + .local_aliases_for_room(&pdu.room_id) + .filter_map(|r| r.ok()) + .any(|room_alias| aliases.is_match(room_alias.as_str())) + }; + + if aliases.iter().any(matching_aliases) + || rooms.map_or(false, |rooms| rooms.contains(&pdu.room_id.as_str().into())) + || users.iter().any(matching_users) + { + services() + .sending + .send_pdu_appservice(appservice.0, pdu_id.clone())?; + } + } + } + + Ok(pdu_id) + } + + pub fn create_hash_and_sign_event( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + _mutex_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result<(PduEvent, CanonicalJsonObject)> { + let PduBuilder { + event_type, + content, + unsigned, + state_key, + redacts, + } = pdu_builder; + + let prev_events: Vec<_> = services() + .rooms + .state + .get_forward_extremities(room_id)? + .into_iter() + .take(20) + .collect(); + + let create_event = services().rooms.state_accessor.room_state_get( + room_id, + &StateEventType::RoomCreate, + "", + )?; + + let create_event_content: Option = create_event + .as_ref() + .map(|create_event| { + serde_json::from_str(create_event.content.get()).map_err(|e| { + warn!("Invalid create event: {}", e); + Error::bad_database("Invalid create event in db.") + }) + }) + .transpose()?; + + // If there was no create event yet, assume we are creating a room with the default + // version right now + let room_version_id = create_event_content + .map_or(services().globals.default_room_version(), |create_event| { + create_event.room_version + }); + let room_version = RoomVersion::new(&room_version_id).expect("room version is supported"); + + let auth_events = services().rooms.state.get_auth_events( + room_id, + &event_type, + sender, + state_key.as_deref(), + &content, + )?; + + // Our depth is the maximum depth of prev_events + 1 + let depth = prev_events + .iter() + .filter_map(|event_id| Some(services().rooms.timeline.get_pdu(event_id).ok()??.depth)) + .max() + .unwrap_or_else(|| uint!(0)) + + uint!(1); + + let mut unsigned = unsigned.unwrap_or_default(); + + if let Some(state_key) = &state_key { + if let Some(prev_pdu) = services().rooms.state_accessor.room_state_get( + room_id, + &event_type.to_string().into(), + state_key, + )? { + unsigned.insert( + "prev_content".to_owned(), + serde_json::from_str(prev_pdu.content.get()).expect("string is valid json"), + ); + unsigned.insert( + "prev_sender".to_owned(), + serde_json::to_value(&prev_pdu.sender).expect("UserId::to_value always works"), + ); + } + } + + let mut pdu = PduEvent { + event_id: ruma::event_id!("$thiswillbefilledinlater").into(), + room_id: room_id.to_owned(), + sender: sender.to_owned(), + origin_server_ts: utils::millis_since_unix_epoch() + .try_into() + .expect("time is valid"), + kind: event_type, + content, + state_key, + prev_events, + depth, + auth_events: auth_events + .values() + .map(|pdu| pdu.event_id.clone()) + .collect(), + redacts, + unsigned: if unsigned.is_empty() { + None + } else { + Some(to_raw_value(&unsigned).expect("to_raw_value always works")) + }, + hashes: EventHash { + sha256: "aaa".to_owned(), + }, + signatures: None, + }; + + let auth_check = state_res::auth_check( + &room_version, + &pdu, + None::, // TODO: third_party_invite + |k, s| auth_events.get(&(k.clone(), s.to_owned())), + ) + .map_err(|e| { + error!("{:?}", e); + Error::bad_database("Auth check failed.") + })?; + + if !auth_check { + return Err(Error::BadRequest( + ErrorKind::Forbidden, + "Event is not authorized.", + )); + } + + // Hash and sign + let mut pdu_json = + utils::to_canonical_object(&pdu).expect("event is valid, we just created it"); + + pdu_json.remove("event_id"); + + // Add origin because synapse likes that (and it's required in the spec) + pdu_json.insert( + "origin".to_owned(), + to_canonical_value(services().globals.server_name()) + .expect("server name is a valid CanonicalJsonValue"), + ); + + match ruma::signatures::hash_and_sign_event( + services().globals.server_name().as_str(), + services().globals.keypair(), + &mut pdu_json, + &room_version_id, + ) { + Ok(_) => {} + Err(e) => { + return match e { + ruma::signatures::Error::PduSize => Err(Error::BadRequest( + ErrorKind::TooLarge, + "Message is too long", + )), + _ => Err(Error::BadRequest( + ErrorKind::Unknown, + "Signing event failed", + )), + } + } + } + + // Generate event id + pdu.event_id = EventId::parse_arc(format!( + "${}", + ruma::signatures::reference_hash(&pdu_json, &room_version_id) + .expect("ruma can calculate reference hashes") + )) + .expect("ruma's reference hashes are valid event ids"); + + pdu_json.insert( + "event_id".to_owned(), + CanonicalJsonValue::String(pdu.event_id.as_str().to_owned()), + ); + + // Generate short event id + let _shorteventid = services() + .rooms + .short + .get_or_create_shorteventid(&pdu.event_id)?; + + Ok((pdu, pdu_json)) + } + + /// Creates a new persisted data unit and adds it to a room. This function takes a + /// roomid_mutex_state, meaning that only this function is able to mutate the room state. + #[tracing::instrument(skip(self, state_lock))] + pub fn build_and_append_pdu( + &self, + pdu_builder: PduBuilder, + sender: &UserId, + room_id: &RoomId, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result> { + let (pdu, pdu_json) = + self.create_hash_and_sign_event(pdu_builder, sender, room_id, state_lock)?; + + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + let statehashid = services().rooms.state.append_to_state(&pdu)?; + + let pdu_id = self.append_pdu( + &pdu, + pdu_json, + // Since this PDU references all pdu_leaves we can update the leaves + // of the room + vec![(*pdu.event_id).to_owned()], + state_lock, + )?; + + // We set the room state after inserting the pdu, so that we never have a moment in time + // where events in the current room state do not exist + services() + .rooms + .state + .set_room_state(room_id, statehashid, state_lock)?; + + let mut servers: HashSet = services() + .rooms + .state_cache + .room_servers(room_id) + .filter_map(|r| r.ok()) + .collect(); + + // In case we are kicking or banning a user, we need to inform their server of the change + if pdu.kind == RoomEventType::RoomMember { + if let Some(state_key_uid) = &pdu + .state_key + .as_ref() + .and_then(|state_key| UserId::parse(state_key.as_str()).ok()) + { + servers.insert(state_key_uid.server_name().to_owned()); + } + } + + // Remove our server from the server list since it will be added to it by room_servers() and/or the if statement above + servers.remove(services().globals.server_name()); + + services().sending.send_pdu(servers.into_iter(), &pdu_id)?; + + Ok(pdu.event_id) + } + + /// Append the incoming event setting the state snapshot to the state from the + /// server that sent the event. + #[tracing::instrument(skip_all)] + pub fn append_incoming_pdu<'a>( + &self, + pdu: &PduEvent, + pdu_json: CanonicalJsonObject, + new_room_leaves: Vec, + state_ids_compressed: HashSet, + soft_fail: bool, + state_lock: &MutexGuard<'_, ()>, // Take mutex guard to make sure users get the room state mutex + ) -> Result>> { + // We append to state before appending the pdu, so we don't have a moment in time with the + // pdu without it's state. This is okay because append_pdu can't fail. + services().rooms.state.set_event_state( + &pdu.event_id, + &pdu.room_id, + state_ids_compressed, + )?; + + if soft_fail { + services() + .rooms + .pdu_metadata + .mark_as_referenced(&pdu.room_id, &pdu.prev_events)?; + services().rooms.state.set_forward_extremities( + &pdu.room_id, + new_room_leaves, + state_lock, + )?; + return Ok(None); + } + + let pdu_id = + services() + .rooms + .timeline + .append_pdu(pdu, pdu_json, new_room_leaves, state_lock)?; + + Ok(Some(pdu_id)) + } + + /// Returns an iterator over all PDUs in a room. + pub fn all_pdus<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + ) -> Result, PduEvent)>> + 'a> { + self.pdus_since(user_id, room_id, 0) + } + + /// Returns an iterator over all events in a room that happened after the event with id `since` + /// in chronological order. + pub fn pdus_since<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + since: u64, + ) -> Result, PduEvent)>> + 'a> { + self.db.pdus_since(user_id, room_id, since) + } + + /// Returns an iterator over all events and their tokens in a room that happened before the + /// event with id `until` in reverse-chronological order. + #[tracing::instrument(skip(self))] + pub fn pdus_until<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + until: u64, + ) -> Result, PduEvent)>> + 'a> { + self.db.pdus_until(user_id, room_id, until) + } + + /// Returns an iterator over all events and their token in a room that happened after the event + /// with id `from` in chronological order. + #[tracing::instrument(skip(self))] + pub fn pdus_after<'a>( + &'a self, + user_id: &UserId, + room_id: &RoomId, + from: u64, + ) -> Result, PduEvent)>> + 'a> { + self.db.pdus_after(user_id, room_id, from) + } + + /// Replace a PDU with the redacted form. + #[tracing::instrument(skip(self, reason))] + pub fn redact_pdu(&self, event_id: &EventId, reason: &PduEvent) -> Result<()> { + if let Some(pdu_id) = self.get_pdu_id(event_id)? { + let mut pdu = self + .get_pdu_from_id(&pdu_id)? + .ok_or_else(|| Error::bad_database("PDU ID points to invalid PDU."))?; + pdu.redact(reason)?; + self.replace_pdu(&pdu_id, &pdu)?; + } + // If event does not exist, just noop + Ok(()) + } +} diff --git a/src/service/rooms/user/data.rs b/src/service/rooms/user/data.rs new file mode 100644 index 00000000..43c4c92a --- /dev/null +++ b/src/service/rooms/user/data.rs @@ -0,0 +1,24 @@ +use crate::Result; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; + +pub trait Data: Send + Sync { + fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()>; + + fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result; + + fn associate_token_shortstatehash( + &self, + room_id: &RoomId, + token: u64, + shortstatehash: u64, + ) -> Result<()>; + + fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result>; + + fn get_shared_rooms<'a>( + &'a self, + users: Vec, + ) -> Result> + 'a>>; +} diff --git a/src/service/rooms/user/mod.rs b/src/service/rooms/user/mod.rs new file mode 100644 index 00000000..a765cfd1 --- /dev/null +++ b/src/service/rooms/user/mod.rs @@ -0,0 +1,45 @@ +mod data; + +pub use data::Data; +use ruma::{OwnedRoomId, OwnedUserId, RoomId, UserId}; + +use crate::Result; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + pub fn reset_notification_counts(&self, user_id: &UserId, room_id: &RoomId) -> Result<()> { + self.db.reset_notification_counts(user_id, room_id) + } + + pub fn notification_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.notification_count(user_id, room_id) + } + + pub fn highlight_count(&self, user_id: &UserId, room_id: &RoomId) -> Result { + self.db.highlight_count(user_id, room_id) + } + + pub fn associate_token_shortstatehash( + &self, + room_id: &RoomId, + token: u64, + shortstatehash: u64, + ) -> Result<()> { + self.db + .associate_token_shortstatehash(room_id, token, shortstatehash) + } + + pub fn get_token_shortstatehash(&self, room_id: &RoomId, token: u64) -> Result> { + self.db.get_token_shortstatehash(room_id, token) + } + + pub fn get_shared_rooms<'a>( + &'a self, + users: Vec, + ) -> Result> + 'a> { + self.db.get_shared_rooms(users) + } +} diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs new file mode 100644 index 00000000..2e574e23 --- /dev/null +++ b/src/service/sending/data.rs @@ -0,0 +1,29 @@ +use ruma::ServerName; + +use crate::Result; + +use super::{OutgoingKind, SendingEventType}; + +pub trait Data: Send + Sync { + fn active_requests<'a>( + &'a self, + ) -> Box, OutgoingKind, SendingEventType)>> + 'a>; + fn active_requests_for<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box, SendingEventType)>> + 'a>; + fn delete_active_request(&self, key: Vec) -> Result<()>; + fn delete_all_active_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; + fn delete_all_requests_for(&self, outgoing_kind: &OutgoingKind) -> Result<()>; + fn queue_requests( + &self, + requests: &[(&OutgoingKind, SendingEventType)], + ) -> Result>>; + fn queued_requests<'a>( + &'a self, + outgoing_kind: &OutgoingKind, + ) -> Box)>> + 'a>; + fn mark_as_active(&self, events: &[(SendingEventType, Vec)]) -> Result<()>; + fn set_latest_educount(&self, server_name: &ServerName, educount: u64) -> Result<()>; + fn get_latest_educount(&self, server_name: &ServerName) -> Result; +} diff --git a/src/service/sending/mod.rs b/src/service/sending/mod.rs new file mode 100644 index 00000000..afa12fc7 --- /dev/null +++ b/src/service/sending/mod.rs @@ -0,0 +1,708 @@ +mod data; + +pub use data::Data; + +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + fmt::Debug, + sync::Arc, + time::{Duration, Instant}, +}; + +use crate::{ + api::{appservice_server, server_server}, + services, + utils::calculate_hash, + Config, Error, PduEvent, Result, +}; +use federation::transactions::send_transaction_message; +use futures_util::{stream::FuturesUnordered, StreamExt}; + +use ruma::{ + api::{ + appservice, + federation::{ + self, + transactions::edu::{ + DeviceListUpdateContent, Edu, ReceiptContent, ReceiptData, ReceiptMap, + }, + }, + OutgoingRequest, + }, + device_id, + events::{ + push_rules::PushRulesEvent, receipt::ReceiptType, AnySyncEphemeralRoomEvent, + GlobalAccountDataEventType, + }, + push, uint, MilliSecondsSinceUnixEpoch, OwnedServerName, OwnedUserId, ServerName, UInt, UserId, +}; +use tokio::{ + select, + sync::{mpsc, Mutex, Semaphore}, +}; +use tracing::{error, warn}; + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum OutgoingKind { + Appservice(String), + Push(OwnedUserId, String), // user and pushkey + Normal(OwnedServerName), +} + +impl OutgoingKind { + #[tracing::instrument(skip(self))] + pub fn get_prefix(&self) -> Vec { + let mut prefix = match self { + OutgoingKind::Appservice(server) => { + let mut p = b"+".to_vec(); + p.extend_from_slice(server.as_bytes()); + p + } + OutgoingKind::Push(user, pushkey) => { + let mut p = b"$".to_vec(); + p.extend_from_slice(user.as_bytes()); + p.push(0xff); + p.extend_from_slice(pushkey.as_bytes()); + p + } + OutgoingKind::Normal(server) => { + let mut p = Vec::new(); + p.extend_from_slice(server.as_bytes()); + p + } + }; + prefix.push(0xff); + + prefix + } +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub enum SendingEventType { + Pdu(Vec), // pduid + Edu(Vec), // pdu json +} + +pub struct Service { + db: &'static dyn Data, + + /// The state for a given state hash. + pub(super) maximum_requests: Arc, + pub sender: mpsc::UnboundedSender<(OutgoingKind, SendingEventType, Vec)>, + receiver: Mutex)>>, +} + +enum TransactionStatus { + Running, + Failed(u32, Instant), // number of times failed, time of last failure + Retrying(u32), // number of times failed +} + +impl Service { + pub fn build(db: &'static dyn Data, config: &Config) -> Arc { + let (sender, receiver) = mpsc::unbounded_channel(); + Arc::new(Self { + db, + sender, + receiver: Mutex::new(receiver), + maximum_requests: Arc::new(Semaphore::new(config.max_concurrent_requests as usize)), + }) + } + + pub fn start_handler(self: &Arc) { + let self2 = Arc::clone(self); + tokio::spawn(async move { + self2.handler().await.unwrap(); + }); + } + + async fn handler(&self) -> Result<()> { + let mut receiver = self.receiver.lock().await; + + let mut futures = FuturesUnordered::new(); + + let mut current_transaction_status = HashMap::::new(); + + // Retry requests we could not finish yet + let mut initial_transactions = HashMap::>::new(); + + for (key, outgoing_kind, event) in self.db.active_requests().filter_map(|r| r.ok()) { + let entry = initial_transactions + .entry(outgoing_kind.clone()) + .or_insert_with(Vec::new); + + if entry.len() > 30 { + warn!( + "Dropping some current events: {:?} {:?} {:?}", + key, outgoing_kind, event + ); + self.db.delete_active_request(key)?; + continue; + } + + entry.push(event); + } + + for (outgoing_kind, events) in initial_transactions { + current_transaction_status.insert(outgoing_kind.clone(), TransactionStatus::Running); + futures.push(Self::handle_events(outgoing_kind.clone(), events)); + } + + loop { + select! { + Some(response) = futures.next() => { + match response { + Ok(outgoing_kind) => { + self.db.delete_all_active_requests_for(&outgoing_kind)?; + + // Find events that have been added since starting the last request + let new_events = self.db.queued_requests(&outgoing_kind).filter_map(|r| r.ok()).take(30).collect::>(); + + if !new_events.is_empty() { + // Insert pdus we found + self.db.mark_as_active(&new_events)?; + + futures.push( + Self::handle_events( + outgoing_kind.clone(), + new_events.into_iter().map(|(event, _)| event).collect(), + ) + ); + } else { + current_transaction_status.remove(&outgoing_kind); + } + } + Err((outgoing_kind, _)) => { + current_transaction_status.entry(outgoing_kind).and_modify(|e| *e = match e { + TransactionStatus::Running => TransactionStatus::Failed(1, Instant::now()), + TransactionStatus::Retrying(n) => TransactionStatus::Failed(*n+1, Instant::now()), + TransactionStatus::Failed(_, _) => { + error!("Request that was not even running failed?!"); + return + }, + }); + } + }; + }, + Some((outgoing_kind, event, key)) = receiver.recv() => { + if let Ok(Some(events)) = self.select_events( + &outgoing_kind, + vec![(event, key)], + &mut current_transaction_status, + ) { + futures.push(Self::handle_events(outgoing_kind, events)); + } + } + } + } + } + + #[tracing::instrument(skip(self, outgoing_kind, new_events, current_transaction_status))] + fn select_events( + &self, + outgoing_kind: &OutgoingKind, + new_events: Vec<(SendingEventType, Vec)>, // Events we want to send: event and full key + current_transaction_status: &mut HashMap, + ) -> Result>> { + let mut retry = false; + let mut allow = true; + + let entry = current_transaction_status.entry(outgoing_kind.clone()); + + entry + .and_modify(|e| match e { + TransactionStatus::Running | TransactionStatus::Retrying(_) => { + allow = false; // already running + } + TransactionStatus::Failed(tries, time) => { + // Fail if a request has failed recently (exponential backoff) + let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); + if min_elapsed_duration > Duration::from_secs(60 * 60 * 24) { + min_elapsed_duration = Duration::from_secs(60 * 60 * 24); + } + + if time.elapsed() < min_elapsed_duration { + allow = false; + } else { + retry = true; + *e = TransactionStatus::Retrying(*tries); + } + } + }) + .or_insert(TransactionStatus::Running); + + if !allow { + return Ok(None); + } + + let mut events = Vec::new(); + + if retry { + // We retry the previous transaction + for (_, e) in self + .db + .active_requests_for(outgoing_kind) + .filter_map(|r| r.ok()) + { + events.push(e); + } + } else { + self.db.mark_as_active(&new_events)?; + for (e, _) in new_events { + events.push(e); + } + + if let OutgoingKind::Normal(server_name) = outgoing_kind { + if let Ok((select_edus, last_count)) = self.select_edus(server_name) { + events.extend(select_edus.into_iter().map(SendingEventType::Edu)); + + self.db.set_latest_educount(server_name, last_count)?; + } + } + } + + Ok(Some(events)) + } + + #[tracing::instrument(skip(self, server_name))] + pub fn select_edus(&self, server_name: &ServerName) -> Result<(Vec>, u64)> { + // u64: count of last edu + let since = self.db.get_latest_educount(server_name)?; + let mut events = Vec::new(); + let mut max_edu_count = since; + let mut device_list_changes = HashSet::new(); + + 'outer: for room_id in services().rooms.state_cache.server_rooms(server_name) { + let room_id = room_id?; + // Look for device list updates in this room + device_list_changes.extend( + services() + .users + .keys_changed(room_id.as_ref(), since, None) + .filter_map(|r| r.ok()) + .filter(|user_id| user_id.server_name() == services().globals.server_name()), + ); + + // Look for read receipts in this room + for r in services() + .rooms + .edus + .read_receipt + .readreceipts_since(&room_id, since) + { + let (user_id, count, read_receipt) = r?; + + if count > max_edu_count { + max_edu_count = count; + } + + if user_id.server_name() != services().globals.server_name() { + continue; + } + + let event: AnySyncEphemeralRoomEvent = + serde_json::from_str(read_receipt.json().get()) + .map_err(|_| Error::bad_database("Invalid edu event in read_receipts."))?; + let federation_event = match event { + AnySyncEphemeralRoomEvent::Receipt(r) => { + let mut read = BTreeMap::new(); + + let (event_id, mut receipt) = r + .content + .0 + .into_iter() + .next() + .expect("we only use one event per read receipt"); + let receipt = receipt + .remove(&ReceiptType::Read) + .expect("our read receipts always set this") + .remove(&user_id) + .expect("our read receipts always have the user here"); + + read.insert( + user_id, + ReceiptData { + data: receipt.clone(), + event_ids: vec![event_id.clone()], + }, + ); + + let receipt_map = ReceiptMap { read }; + + let mut receipts = BTreeMap::new(); + receipts.insert(room_id.clone(), receipt_map); + + Edu::Receipt(ReceiptContent { receipts }) + } + _ => { + Error::bad_database("Invalid event type in read_receipts"); + continue; + } + }; + + events.push(serde_json::to_vec(&federation_event).expect("json can be serialized")); + + if events.len() >= 20 { + break 'outer; + } + } + } + + for user_id in device_list_changes { + // Empty prev id forces synapse to resync: https://github.com/matrix-org/synapse/blob/98aec1cc9da2bd6b8e34ffb282c85abf9b8b42ca/synapse/handlers/device.py#L767 + // Because synapse resyncs, we can just insert dummy data + let edu = Edu::DeviceListUpdate(DeviceListUpdateContent { + user_id, + device_id: device_id!("dummy").to_owned(), + device_display_name: Some("Dummy".to_owned()), + stream_id: uint!(1), + prev_id: Vec::new(), + deleted: None, + keys: None, + }); + + events.push(serde_json::to_vec(&edu).expect("json can be serialized")); + } + + Ok((events, max_edu_count)) + } + + #[tracing::instrument(skip(self, pdu_id, user, pushkey))] + pub fn send_push_pdu(&self, pdu_id: &[u8], user: &UserId, pushkey: String) -> Result<()> { + let outgoing_kind = OutgoingKind::Push(user.to_owned(), pushkey); + let event = SendingEventType::Pdu(pdu_id.to_owned()); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); + + Ok(()) + } + + #[tracing::instrument(skip(self, servers, pdu_id))] + pub fn send_pdu>( + &self, + servers: I, + pdu_id: &[u8], + ) -> Result<()> { + let requests = servers + .into_iter() + .map(|server| { + ( + OutgoingKind::Normal(server), + SendingEventType::Pdu(pdu_id.to_owned()), + ) + }) + .collect::>(); + let keys = self.db.queue_requests( + &requests + .iter() + .map(|(o, e)| (o, e.clone())) + .collect::>(), + )?; + for ((outgoing_kind, event), key) in requests.into_iter().zip(keys) { + self.sender + .send((outgoing_kind.to_owned(), event, key)) + .unwrap(); + } + + Ok(()) + } + + #[tracing::instrument(skip(self, server, serialized))] + pub fn send_reliable_edu( + &self, + server: &ServerName, + serialized: Vec, + id: u64, + ) -> Result<()> { + let outgoing_kind = OutgoingKind::Normal(server.to_owned()); + let event = SendingEventType::Edu(serialized); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); + + Ok(()) + } + + #[tracing::instrument(skip(self))] + pub fn send_pdu_appservice(&self, appservice_id: String, pdu_id: Vec) -> Result<()> { + let outgoing_kind = OutgoingKind::Appservice(appservice_id); + let event = SendingEventType::Pdu(pdu_id); + let keys = self.db.queue_requests(&[(&outgoing_kind, event.clone())])?; + self.sender + .send((outgoing_kind, event, keys.into_iter().next().unwrap())) + .unwrap(); + + Ok(()) + } + + /// Cleanup event data + /// Used for instance after we remove an appservice registration + /// + #[tracing::instrument(skip(self))] + pub fn cleanup_events(&self, appservice_id: String) -> Result<()> { + self.db + .delete_all_requests_for(&OutgoingKind::Appservice(appservice_id))?; + + Ok(()) + } + + #[tracing::instrument(skip(events, kind))] + async fn handle_events( + kind: OutgoingKind, + events: Vec, + ) -> Result { + match &kind { + OutgoingKind::Appservice(id) => { + let mut pdu_jsons = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + pdu_jsons.push(services().rooms.timeline + .get_pdu_from_id(pdu_id) + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database( + "[Appservice] Event in servernameevent_data not found in db.", + ), + ) + })? + .to_room_event()) + } + SendingEventType::Edu(_) => { + // Appservices don't need EDUs (?) + } + } + } + + let permit = services().sending.maximum_requests.acquire().await; + + let response = appservice_server::send_request( + services() + .appservice + .get_registration(id) + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database( + "[Appservice] Could not load registration from db.", + ), + ) + })?, + appservice::event::push_events::v1::Request { + events: &pdu_jsons, + txn_id: (&*base64::encode_config( + calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, + }) + .collect::>(), + ), + base64::URL_SAFE_NO_PAD, + )) + .into(), + }, + ) + .await + .map(|_response| kind.clone()) + .map_err(|e| (kind, e)); + + drop(permit); + + response + } + OutgoingKind::Push(userid, pushkey) => { + let mut pdus = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + pdus.push( + services().rooms + .timeline + .get_pdu_from_id(pdu_id) + .map_err(|e| (kind.clone(), e))? + .ok_or_else(|| { + ( + kind.clone(), + Error::bad_database( + "[Push] Event in servernamevent_datas not found in db.", + ), + ) + })?, + ); + } + SendingEventType::Edu(_) => { + // Push gateways don't need EDUs (?) + } + } + } + + for pdu in pdus { + // Redacted events are not notification targets (we don't send push for them) + if let Some(unsigned) = &pdu.unsigned { + if let Ok(unsigned) = + serde_json::from_str::(unsigned.get()) + { + if unsigned.get("redacted_because").is_some() { + continue; + } + } + } + + let pusher = match services() + .pusher + .get_pusher(userid, pushkey) + .map_err(|e| (OutgoingKind::Push(userid.clone(), pushkey.clone()), e))? + { + Some(pusher) => pusher, + None => continue, + }; + + let rules_for_user = services() + .account_data + .get( + None, + userid, + GlobalAccountDataEventType::PushRules.to_string().into(), + ) + .unwrap_or_default() + .and_then(|event| serde_json::from_str::(event.get()).ok()) + .map(|ev: PushRulesEvent| ev.content.global) + .unwrap_or_else(|| push::Ruleset::server_default(userid)); + + let unread: UInt = services() + .rooms + .user + .notification_count(userid, &pdu.room_id) + .map_err(|e| (kind.clone(), e))? + .try_into() + .expect("notification count can't go that high"); + + let permit = services().sending.maximum_requests.acquire().await; + + let _response = services() + .pusher + .send_push_notice(userid, unread, &pusher, rules_for_user, &pdu) + .await + .map(|_response| kind.clone()) + .map_err(|e| (kind.clone(), e)); + + drop(permit); + } + Ok(OutgoingKind::Push(userid.clone(), pushkey.clone())) + } + OutgoingKind::Normal(server) => { + let mut edu_jsons = Vec::new(); + let mut pdu_jsons = Vec::new(); + + for event in &events { + match event { + SendingEventType::Pdu(pdu_id) => { + // TODO: check room version and remove event_id if needed + let raw = PduEvent::convert_to_outgoing_federation_event( + services().rooms + .timeline + .get_pdu_json_from_id(pdu_id) + .map_err(|e| (OutgoingKind::Normal(server.clone()), e))? + .ok_or_else(|| { + error!("event not found: {server} {pdu_id:?}"); + ( + OutgoingKind::Normal(server.clone()), + Error::bad_database( + "[Normal] Event in servernamevent_datas not found in db.", + ), + ) + })?, + ); + pdu_jsons.push(raw); + } + SendingEventType::Edu(edu) => { + if let Ok(raw) = serde_json::from_slice(edu) { + edu_jsons.push(raw); + } + } + } + } + + let permit = services().sending.maximum_requests.acquire().await; + + let response = server_server::send_request( + server, + send_transaction_message::v1::Request { + origin: services().globals.server_name(), + pdus: &pdu_jsons, + edus: &edu_jsons, + origin_server_ts: MilliSecondsSinceUnixEpoch::now(), + transaction_id: (&*base64::encode_config( + calculate_hash( + &events + .iter() + .map(|e| match e { + SendingEventType::Edu(b) | SendingEventType::Pdu(b) => &**b, + }) + .collect::>(), + ), + base64::URL_SAFE_NO_PAD, + )) + .into(), + }, + ) + .await + .map(|response| { + for pdu in response.pdus { + if pdu.1.is_err() { + warn!("Failed to send to {}: {:?}", server, pdu); + } + } + kind.clone() + }) + .map_err(|e| (kind, e)); + + drop(permit); + + response + } + } + } + + #[tracing::instrument(skip(self, destination, request))] + pub async fn send_federation_request( + &self, + destination: &ServerName, + request: T, + ) -> Result + where + T: Debug, + { + let permit = self.maximum_requests.acquire().await; + let response = server_server::send_request(destination, request).await; + drop(permit); + + response + } + + #[tracing::instrument(skip(self, registration, request))] + pub async fn send_appservice_request( + &self, + registration: serde_yaml::Value, + request: T, + ) -> Result + where + T: Debug, + { + let permit = self.maximum_requests.acquire().await; + let response = appservice_server::send_request(registration, request).await; + drop(permit); + + response + } +} diff --git a/src/service/transaction_ids/data.rs b/src/service/transaction_ids/data.rs new file mode 100644 index 00000000..74855318 --- /dev/null +++ b/src/service/transaction_ids/data.rs @@ -0,0 +1,19 @@ +use crate::Result; +use ruma::{DeviceId, TransactionId, UserId}; + +pub trait Data: Send + Sync { + fn add_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + data: &[u8], + ) -> Result<()>; + + fn existing_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + ) -> Result>>; +} diff --git a/src/service/transaction_ids/mod.rs b/src/service/transaction_ids/mod.rs new file mode 100644 index 00000000..2fa3b02e --- /dev/null +++ b/src/service/transaction_ids/mod.rs @@ -0,0 +1,31 @@ +mod data; + +pub use data::Data; + +use crate::Result; +use ruma::{DeviceId, TransactionId, UserId}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + pub fn add_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + data: &[u8], + ) -> Result<()> { + self.db.add_txnid(user_id, device_id, txn_id, data) + } + + pub fn existing_txnid( + &self, + user_id: &UserId, + device_id: Option<&DeviceId>, + txn_id: &TransactionId, + ) -> Result>> { + self.db.existing_txnid(user_id, device_id, txn_id) + } +} diff --git a/src/service/uiaa/data.rs b/src/service/uiaa/data.rs new file mode 100644 index 00000000..c64deb90 --- /dev/null +++ b/src/service/uiaa/data.rs @@ -0,0 +1,34 @@ +use crate::Result; +use ruma::{api::client::uiaa::UiaaInfo, CanonicalJsonValue, DeviceId, UserId}; + +pub trait Data: Send + Sync { + fn set_uiaa_request( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + request: &CanonicalJsonValue, + ) -> Result<()>; + + fn get_uiaa_request( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + ) -> Option; + + fn update_uiaa_session( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + uiaainfo: Option<&UiaaInfo>, + ) -> Result<()>; + + fn get_uiaa_session( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + ) -> Result; +} diff --git a/src/service/uiaa/mod.rs b/src/service/uiaa/mod.rs new file mode 100644 index 00000000..672290c3 --- /dev/null +++ b/src/service/uiaa/mod.rs @@ -0,0 +1,145 @@ +mod data; + +pub use data::Data; + +use ruma::{ + api::client::{ + error::ErrorKind, + uiaa::{AuthType, IncomingAuthData, IncomingPassword, IncomingUserIdentifier, UiaaInfo}, + }, + CanonicalJsonValue, DeviceId, UserId, +}; +use tracing::error; + +use crate::{api::client_server::SESSION_ID_LENGTH, services, utils, Error, Result}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Creates a new Uiaa session. Make sure the session token is unique. + pub fn create( + &self, + user_id: &UserId, + device_id: &DeviceId, + uiaainfo: &UiaaInfo, + json_body: &CanonicalJsonValue, + ) -> Result<()> { + self.db.set_uiaa_request( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session should be set"), // TODO: better session error handling (why is it optional in ruma?) + json_body, + )?; + self.db.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session should be set"), + Some(uiaainfo), + ) + } + + pub fn try_auth( + &self, + user_id: &UserId, + device_id: &DeviceId, + auth: &IncomingAuthData, + uiaainfo: &UiaaInfo, + ) -> Result<(bool, UiaaInfo)> { + let mut uiaainfo = auth + .session() + .map(|session| self.db.get_uiaa_session(user_id, device_id, session)) + .unwrap_or_else(|| Ok(uiaainfo.clone()))?; + + if uiaainfo.session.is_none() { + uiaainfo.session = Some(utils::random_string(SESSION_ID_LENGTH)); + } + + match auth { + // Find out what the user completed + IncomingAuthData::Password(IncomingPassword { + identifier, + password, + .. + }) => { + let username = match identifier { + IncomingUserIdentifier::UserIdOrLocalpart(username) => username, + _ => { + return Err(Error::BadRequest( + ErrorKind::Unrecognized, + "Identifier type not recognized.", + )) + } + }; + + let user_id = UserId::parse_with_server_name( + username.clone(), + services().globals.server_name(), + ) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "User ID is invalid."))?; + + // Check if password is correct + if let Some(hash) = services().users.password_hash(&user_id)? { + let hash_matches = + argon2::verify_encoded(&hash, password.as_bytes()).unwrap_or(false); + + if !hash_matches { + uiaainfo.auth_error = Some(ruma::api::client::error::ErrorBody { + kind: ErrorKind::Forbidden, + message: "Invalid username or password.".to_owned(), + }); + return Ok((false, uiaainfo)); + } + } + + // Password was correct! Let's add it to `completed` + uiaainfo.completed.push(AuthType::Password); + } + IncomingAuthData::Dummy(_) => { + uiaainfo.completed.push(AuthType::Dummy); + } + k => error!("type not supported: {:?}", k), + } + + // Check if a flow now succeeds + let mut completed = false; + 'flows: for flow in &mut uiaainfo.flows { + for stage in &flow.stages { + if !uiaainfo.completed.contains(stage) { + continue 'flows; + } + } + // We didn't break, so this flow succeeded! + completed = true; + } + + if !completed { + self.db.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session is always set"), + Some(&uiaainfo), + )?; + return Ok((false, uiaainfo)); + } + + // UIAA was successful! Remove this session and return true + self.db.update_uiaa_session( + user_id, + device_id, + uiaainfo.session.as_ref().expect("session is always set"), + None, + )?; + Ok((true, uiaainfo)) + } + + pub fn get_uiaa_request( + &self, + user_id: &UserId, + device_id: &DeviceId, + session: &str, + ) -> Option { + self.db.get_uiaa_request(user_id, device_id, session) + } +} diff --git a/src/service/users/data.rs b/src/service/users/data.rs new file mode 100644 index 00000000..bc1db33f --- /dev/null +++ b/src/service/users/data.rs @@ -0,0 +1,201 @@ +use crate::Result; +use ruma::{ + api::client::{device::Device, filter::IncomingFilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, + OwnedUserId, UInt, UserId, +}; +use std::collections::BTreeMap; + +pub trait Data: Send + Sync { + /// Check if a user has an account on this homeserver. + fn exists(&self, user_id: &UserId) -> Result; + + /// Check if account is deactivated + fn is_deactivated(&self, user_id: &UserId) -> Result; + + /// Returns the number of users registered on this server. + fn count(&self) -> Result; + + /// Find out which user an access token belongs to. + fn find_from_token(&self, token: &str) -> Result>; + + /// Returns an iterator over all users on this homeserver. + fn iter<'a>(&'a self) -> Box> + 'a>; + + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password is greater then zero. + fn list_local_users(&self) -> Result>; + + /// Returns the password hash for the given user. + fn password_hash(&self, user_id: &UserId) -> Result>; + + /// Hash and set the user's password to the Argon2 hash + fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()>; + + /// Returns the displayname of a user on this homeserver. + fn displayname(&self, user_id: &UserId) -> Result>; + + /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. + fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()>; + + /// Get the avatar_url of a user. + fn avatar_url(&self, user_id: &UserId) -> Result>; + + /// Sets a new avatar_url or removes it if avatar_url is None. + fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()>; + + /// Get the blurhash of a user. + fn blurhash(&self, user_id: &UserId) -> Result>; + + /// Sets a new avatar_url or removes it if avatar_url is None. + fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()>; + + /// Adds a new device to a user. + fn create_device( + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + initial_device_display_name: Option, + ) -> Result<()>; + + /// Removes a device from a user. + fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()>; + + /// Returns an iterator over all device ids of this user. + fn all_device_ids<'a>( + &'a self, + user_id: &UserId, + ) -> Box> + 'a>; + + /// Replaces the access token of one device. + fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()>; + + fn add_one_time_key( + &self, + user_id: &UserId, + device_id: &DeviceId, + one_time_key_key: &DeviceKeyId, + one_time_key_value: &Raw, + ) -> Result<()>; + + fn last_one_time_keys_update(&self, user_id: &UserId) -> Result; + + fn take_one_time_key( + &self, + user_id: &UserId, + device_id: &DeviceId, + key_algorithm: &DeviceKeyAlgorithm, + ) -> Result)>>; + + fn count_one_time_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result>; + + fn add_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + device_keys: &Raw, + ) -> Result<()>; + + fn add_cross_signing_keys( + &self, + user_id: &UserId, + master_key: &Raw, + self_signing_key: &Option>, + user_signing_key: &Option>, + ) -> Result<()>; + + fn sign_key( + &self, + target_id: &UserId, + key_id: &str, + signature: (String, String), + sender_id: &UserId, + ) -> Result<()>; + + fn keys_changed<'a>( + &'a self, + user_or_room_id: &str, + from: u64, + to: Option, + ) -> Box> + 'a>; + + fn mark_device_key_update(&self, user_id: &UserId) -> Result<()>; + + fn get_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result>>; + + fn get_master_key( + &self, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>>; + + fn get_self_signing_key( + &self, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>>; + + fn get_user_signing_key(&self, user_id: &UserId) -> Result>>; + + fn add_to_device_event( + &self, + sender: &UserId, + target_user_id: &UserId, + target_device_id: &DeviceId, + event_type: &str, + content: serde_json::Value, + ) -> Result<()>; + + fn get_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result>>; + + fn remove_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + until: u64, + ) -> Result<()>; + + fn update_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + device: &Device, + ) -> Result<()>; + + /// Get device metadata. + fn get_device_metadata(&self, user_id: &UserId, device_id: &DeviceId) + -> Result>; + + fn get_devicelist_version(&self, user_id: &UserId) -> Result>; + + fn all_devices_metadata<'a>( + &'a self, + user_id: &UserId, + ) -> Box> + 'a>; + + /// Creates a new sync filter. Returns the filter id. + fn create_filter(&self, user_id: &UserId, filter: &IncomingFilterDefinition) -> Result; + + fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result>; +} diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs new file mode 100644 index 00000000..9dcfa8be --- /dev/null +++ b/src/service/users/mod.rs @@ -0,0 +1,371 @@ +mod data; +use std::{collections::BTreeMap, mem}; + +pub use data::Data; +use ruma::{ + api::client::{device::Device, error::ErrorKind, filter::IncomingFilterDefinition}, + encryption::{CrossSigningKey, DeviceKeys, OneTimeKey}, + events::AnyToDeviceEvent, + serde::Raw, + DeviceId, DeviceKeyAlgorithm, DeviceKeyId, OwnedDeviceId, OwnedDeviceKeyId, OwnedMxcUri, + OwnedUserId, RoomAliasId, UInt, UserId, +}; + +use crate::{services, Error, Result}; + +pub struct Service { + pub db: &'static dyn Data, +} + +impl Service { + /// Check if a user has an account on this homeserver. + pub fn exists(&self, user_id: &UserId) -> Result { + self.db.exists(user_id) + } + + /// Check if account is deactivated + pub fn is_deactivated(&self, user_id: &UserId) -> Result { + self.db.is_deactivated(user_id) + } + + /// Check if a user is an admin + pub fn is_admin(&self, user_id: &UserId) -> Result { + let admin_room_alias_id = + RoomAliasId::parse(format!("#admins:{}", services().globals.server_name())) + .map_err(|_| Error::BadRequest(ErrorKind::InvalidParam, "Invalid alias."))?; + let admin_room_id = services() + .rooms + .alias + .resolve_local_alias(&admin_room_alias_id)? + .unwrap(); + + services() + .rooms + .state_cache + .is_joined(user_id, &admin_room_id) + } + + /// Create a new user account on this homeserver. + pub fn create(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + self.db.set_password(user_id, password)?; + Ok(()) + } + + /// Returns the number of users registered on this server. + pub fn count(&self) -> Result { + self.db.count() + } + + /// Find out which user an access token belongs to. + pub fn find_from_token(&self, token: &str) -> Result> { + self.db.find_from_token(token) + } + + /// Returns an iterator over all users on this homeserver. + pub fn iter(&self) -> impl Iterator> + '_ { + self.db.iter() + } + + /// Returns a list of local users as list of usernames. + /// + /// A user account is considered `local` if the length of it's password is greater then zero. + pub fn list_local_users(&self) -> Result> { + self.db.list_local_users() + } + + /// Returns the password hash for the given user. + pub fn password_hash(&self, user_id: &UserId) -> Result> { + self.db.password_hash(user_id) + } + + /// Hash and set the user's password to the Argon2 hash + pub fn set_password(&self, user_id: &UserId, password: Option<&str>) -> Result<()> { + self.db.set_password(user_id, password) + } + + /// Returns the displayname of a user on this homeserver. + pub fn displayname(&self, user_id: &UserId) -> Result> { + self.db.displayname(user_id) + } + + /// Sets a new displayname or removes it if displayname is None. You still need to nofify all rooms of this change. + pub fn set_displayname(&self, user_id: &UserId, displayname: Option) -> Result<()> { + self.db.set_displayname(user_id, displayname) + } + + /// Get the avatar_url of a user. + pub fn avatar_url(&self, user_id: &UserId) -> Result> { + self.db.avatar_url(user_id) + } + + /// Sets a new avatar_url or removes it if avatar_url is None. + pub fn set_avatar_url(&self, user_id: &UserId, avatar_url: Option) -> Result<()> { + self.db.set_avatar_url(user_id, avatar_url) + } + + /// Get the blurhash of a user. + pub fn blurhash(&self, user_id: &UserId) -> Result> { + self.db.blurhash(user_id) + } + + /// Sets a new avatar_url or removes it if avatar_url is None. + pub fn set_blurhash(&self, user_id: &UserId, blurhash: Option) -> Result<()> { + self.db.set_blurhash(user_id, blurhash) + } + + /// Adds a new device to a user. + pub fn create_device( + &self, + user_id: &UserId, + device_id: &DeviceId, + token: &str, + initial_device_display_name: Option, + ) -> Result<()> { + self.db + .create_device(user_id, device_id, token, initial_device_display_name) + } + + /// Removes a device from a user. + pub fn remove_device(&self, user_id: &UserId, device_id: &DeviceId) -> Result<()> { + self.db.remove_device(user_id, device_id) + } + + /// Returns an iterator over all device ids of this user. + pub fn all_device_ids<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { + self.db.all_device_ids(user_id) + } + + /// Replaces the access token of one device. + pub fn set_token(&self, user_id: &UserId, device_id: &DeviceId, token: &str) -> Result<()> { + self.db.set_token(user_id, device_id, token) + } + + pub fn add_one_time_key( + &self, + user_id: &UserId, + device_id: &DeviceId, + one_time_key_key: &DeviceKeyId, + one_time_key_value: &Raw, + ) -> Result<()> { + self.db + .add_one_time_key(user_id, device_id, one_time_key_key, one_time_key_value) + } + + pub fn last_one_time_keys_update(&self, user_id: &UserId) -> Result { + self.db.last_one_time_keys_update(user_id) + } + + pub fn take_one_time_key( + &self, + user_id: &UserId, + device_id: &DeviceId, + key_algorithm: &DeviceKeyAlgorithm, + ) -> Result)>> { + self.db.take_one_time_key(user_id, device_id, key_algorithm) + } + + pub fn count_one_time_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result> { + self.db.count_one_time_keys(user_id, device_id) + } + + pub fn add_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + device_keys: &Raw, + ) -> Result<()> { + self.db.add_device_keys(user_id, device_id, device_keys) + } + + pub fn add_cross_signing_keys( + &self, + user_id: &UserId, + master_key: &Raw, + self_signing_key: &Option>, + user_signing_key: &Option>, + ) -> Result<()> { + self.db + .add_cross_signing_keys(user_id, master_key, self_signing_key, user_signing_key) + } + + pub fn sign_key( + &self, + target_id: &UserId, + key_id: &str, + signature: (String, String), + sender_id: &UserId, + ) -> Result<()> { + self.db.sign_key(target_id, key_id, signature, sender_id) + } + + pub fn keys_changed<'a>( + &'a self, + user_or_room_id: &str, + from: u64, + to: Option, + ) -> impl Iterator> + 'a { + self.db.keys_changed(user_or_room_id, from, to) + } + + pub fn mark_device_key_update(&self, user_id: &UserId) -> Result<()> { + self.db.mark_device_key_update(user_id) + } + + pub fn get_device_keys( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result>> { + self.db.get_device_keys(user_id, device_id) + } + + pub fn get_master_key( + &self, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.db.get_master_key(user_id, allowed_signatures) + } + + pub fn get_self_signing_key( + &self, + user_id: &UserId, + allowed_signatures: &dyn Fn(&UserId) -> bool, + ) -> Result>> { + self.db.get_self_signing_key(user_id, allowed_signatures) + } + + pub fn get_user_signing_key(&self, user_id: &UserId) -> Result>> { + self.db.get_user_signing_key(user_id) + } + + pub fn add_to_device_event( + &self, + sender: &UserId, + target_user_id: &UserId, + target_device_id: &DeviceId, + event_type: &str, + content: serde_json::Value, + ) -> Result<()> { + self.db.add_to_device_event( + sender, + target_user_id, + target_device_id, + event_type, + content, + ) + } + + pub fn get_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result>> { + self.db.get_to_device_events(user_id, device_id) + } + + pub fn remove_to_device_events( + &self, + user_id: &UserId, + device_id: &DeviceId, + until: u64, + ) -> Result<()> { + self.db.remove_to_device_events(user_id, device_id, until) + } + + pub fn update_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + device: &Device, + ) -> Result<()> { + self.db.update_device_metadata(user_id, device_id, device) + } + + /// Get device metadata. + pub fn get_device_metadata( + &self, + user_id: &UserId, + device_id: &DeviceId, + ) -> Result> { + self.db.get_device_metadata(user_id, device_id) + } + + pub fn get_devicelist_version(&self, user_id: &UserId) -> Result> { + self.db.get_devicelist_version(user_id) + } + + pub fn all_devices_metadata<'a>( + &'a self, + user_id: &UserId, + ) -> impl Iterator> + 'a { + self.db.all_devices_metadata(user_id) + } + + /// Deactivate account + pub fn deactivate_account(&self, user_id: &UserId) -> Result<()> { + // Remove all associated devices + for device_id in self.all_device_ids(user_id) { + self.remove_device(user_id, &device_id?)?; + } + + // Set the password to "" to indicate a deactivated account. Hashes will never result in an + // empty string, so the user will not be able to log in again. Systems like changing the + // password without logging in should check if the account is deactivated. + self.db.set_password(user_id, None)?; + + // TODO: Unhook 3PID + Ok(()) + } + + /// Creates a new sync filter. Returns the filter id. + pub fn create_filter( + &self, + user_id: &UserId, + filter: &IncomingFilterDefinition, + ) -> Result { + self.db.create_filter(user_id, filter) + } + + pub fn get_filter( + &self, + user_id: &UserId, + filter_id: &str, + ) -> Result> { + self.db.get_filter(user_id, filter_id) + } +} + +/// Ensure that a user only sees signatures from themselves and the target user +pub fn clean_signatures bool>( + cross_signing_key: &mut serde_json::Value, + user_id: &UserId, + allowed_signatures: F, +) -> Result<(), Error> { + if let Some(signatures) = cross_signing_key + .get_mut("signatures") + .and_then(|v| v.as_object_mut()) + { + // Don't allocate for the full size of the current signatures, but require + // at most one resize if nothing is dropped + let new_capacity = signatures.len() / 2; + for (user, signature) in + mem::replace(signatures, serde_json::Map::with_capacity(new_capacity)) + { + let id = <&UserId>::try_from(user.as_str()) + .map_err(|_| Error::bad_database("Invalid user ID in database."))?; + if id == user_id || allowed_signatures(id) { + signatures.insert(user, signature); + } + } + } + + Ok(()) +} diff --git a/src/error.rs b/src/utils/error.rs similarity index 71% rename from src/error.rs rename to src/utils/error.rs index 7faddc91..9c8617f9 100644 --- a/src/error.rs +++ b/src/utils/error.rs @@ -1,24 +1,20 @@ +use std::convert::Infallible; + +use http::StatusCode; use ruma::{ api::client::{ error::{Error as RumaError, ErrorKind}, - r0::uiaa::UiaaInfo, + uiaa::{UiaaInfo, UiaaResponse}, }, - ServerName, + OwnedServerName, }; use thiserror::Error; -use tracing::warn; +use tracing::{error, warn}; -#[cfg(feature = "conduit_bin")] -use { - crate::RumaResponse, - http::StatusCode, - rocket::{ - response::{self, Responder}, - Request, - }, - ruma::api::client::r0::uiaa::UiaaResponse, - tracing::error, -}; +#[cfg(feature = "persy")] +use persy::PersyError; + +use crate::RumaResponse; pub type Result = std::result::Result; @@ -36,9 +32,18 @@ pub enum Error { #[from] source: rusqlite::Error, }, + #[cfg(feature = "persy")] + #[error("There was a problem with the connection to the persy database.")] + PersyError { source: PersyError }, #[cfg(feature = "heed")] #[error("There was a problem with the connection to the heed database: {error}")] HeedError { error: String }, + #[cfg(feature = "rocksdb")] + #[error("There was a problem with the connection to the rocksdb database: {source}")] + RocksDbError { + #[from] + source: rocksdb::Error, + }, #[error("Could not generate an image.")] ImageError { #[from] @@ -50,7 +55,7 @@ pub enum Error { source: reqwest::Error, }, #[error("{0}")] - FederationError(Box, RumaError), + FederationError(OwnedServerName, RumaError), #[error("Could not do this io: {source}")] IoError { #[from] @@ -69,6 +74,12 @@ pub enum Error { BadRequest(ErrorKind, &'static str), #[error("{0}")] Conflict(&'static str), // This is only needed for when a room alias already exists + #[cfg(feature = "conduit_bin")] + #[error("{0}")] + ExtensionError(#[from] axum::extract::rejection::ExtensionRejection), + #[cfg(feature = "conduit_bin")] + #[error("{0}")] + PathError(#[from] axum::extract::rejection::PathRejection), } impl Error { @@ -106,7 +117,7 @@ impl Error { StatusCode::FORBIDDEN } Unauthorized | UnknownToken { .. } | MissingToken => StatusCode::UNAUTHORIZED, - NotFound => StatusCode::NOT_FOUND, + NotFound | Unrecognized => StatusCode::NOT_FOUND, LimitExceeded { .. } => StatusCode::TOO_MANY_REQUESTS, UserDeactivated => StatusCode::FORBIDDEN, TooLarge => StatusCode::PAYLOAD_TOO_LARGE, @@ -127,12 +138,24 @@ impl Error { } } +#[cfg(feature = "persy")] +impl> From> for Error { + fn from(err: persy::PE) -> Self { + Error::PersyError { + source: err.error().into(), + } + } +} + +impl From for Error { + fn from(i: Infallible) -> Self { + match i {} + } +} + #[cfg(feature = "conduit_bin")] -impl<'r, 'o> Responder<'r, 'o> for Error -where - 'o: 'r, -{ - fn respond_to(self, r: &'r Request<'_>) -> response::Result<'o> { - self.to_response().respond_to(r) +impl axum::response::IntoResponse for Error { + fn into_response(self) -> axum::response::Response { + self.to_response().into_response() } } diff --git a/src/utils.rs b/src/utils/mod.rs similarity index 69% rename from src/utils.rs rename to src/utils/mod.rs index 26d71a8c..0b5b1ae4 100644 --- a/src/utils.rs +++ b/src/utils/mod.rs @@ -1,15 +1,16 @@ +pub mod error; + use argon2::{Config, Variant}; use cmp::Ordering; use rand::prelude::*; -use ruma::serde::{try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; +use ring::digest; +use ruma::{canonical_json::try_from_json_map, CanonicalJsonError, CanonicalJsonObject}; use std::{ - cmp, - convert::TryInto, + cmp, fmt, str::FromStr, time::{SystemTime, UNIX_EPOCH}, }; -#[tracing::instrument] pub fn millis_since_unix_epoch() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) @@ -40,19 +41,16 @@ pub fn generate_keypair() -> Vec { } /// Parses the bytes into an u64. -#[tracing::instrument(skip(bytes))] pub fn u64_from_bytes(bytes: &[u8]) -> Result { let array: [u8; 8] = bytes.try_into()?; Ok(u64::from_be_bytes(array)) } /// Parses the bytes into a string. -#[tracing::instrument(skip(bytes))] pub fn string_from_bytes(bytes: &[u8]) -> Result { String::from_utf8(bytes.to_vec()) } -#[tracing::instrument(skip(length))] pub fn random_string(length: usize) -> String { thread_rng() .sample_iter(&rand::distributions::Alphanumeric) @@ -62,8 +60,7 @@ pub fn random_string(length: usize) -> String { } /// Calculate a new hash for the given password -#[tracing::instrument(skip(password))] -pub fn calculate_hash(password: &str) -> Result { +pub fn calculate_password_hash(password: &str) -> Result { let hashing_config = Config { variant: Variant::Argon2id, ..Default::default() @@ -73,7 +70,14 @@ pub fn calculate_hash(password: &str) -> Result { argon2::hash_encoded(password.as_bytes(), salt.as_bytes(), &hashing_config) } -#[tracing::instrument(skip(iterators, check_order))] +#[tracing::instrument(skip(keys))] +pub fn calculate_hash(keys: &[&[u8]]) -> Vec { + // We only hash the pdu's event ids, not the whole pdu + let bytes = keys.join(&0xff); + let hash = digest::digest(&digest::SHA256, &bytes); + hash.as_ref().to_owned() +} + pub fn common_elements( mut iterators: impl Iterator>>, check_order: impl Fn(&[u8], &[u8]) -> Ordering, @@ -101,7 +105,6 @@ pub fn common_elements( /// Fallible conversion from any value that implements `Serialize` to a `CanonicalJsonObject`. /// /// `value` must serialize to an `serde_json::Value::Object`. -#[tracing::instrument(skip(value))] pub fn to_canonical_object( value: T, ) -> Result { @@ -115,7 +118,6 @@ pub fn to_canonical_object( } } -#[tracing::instrument(skip(deserializer))] pub fn deserialize_from_str< 'de, D: serde::de::Deserializer<'de>, @@ -141,3 +143,40 @@ pub fn deserialize_from_str< } deserializer.deserialize_str(Visitor(std::marker::PhantomData)) } + +// Copied from librustdoc: +// https://github.com/rust-lang/rust/blob/cbaeec14f90b59a91a6b0f17fc046c66fa811892/src/librustdoc/html/escape.rs + +/// Wrapper struct which will emit the HTML-escaped version of the contained +/// string when passed to a format string. +pub struct HtmlEscape<'a>(pub &'a str); + +impl<'a> fmt::Display for HtmlEscape<'a> { + fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { + // Because the internet is always right, turns out there's not that many + // characters to escape: http://stackoverflow.com/questions/7381974 + let HtmlEscape(s) = *self; + let pile_o_bits = s; + let mut last = 0; + for (i, ch) in s.char_indices() { + let s = match ch { + '>' => ">", + '<' => "<", + '&' => "&", + '\'' => "'", + '"' => """, + _ => continue, + }; + fmt.write_str(&pile_o_bits[last..i])?; + fmt.write_str(s)?; + // NOTE: we only expect single byte characters here - which is fine as long as we + // only match single byte characters + last = i + 1; + } + + if last < s.len() { + fmt.write_str(&pile_o_bits[last..])?; + } + Ok(()) + } +} diff --git a/tests/Complement.Dockerfile b/tests/Complement.Dockerfile index f6c62fe8..b9d0f8c9 100644 --- a/tests/Complement.Dockerfile +++ b/tests/Complement.Dockerfile @@ -27,19 +27,18 @@ RUN chmod +x /workdir/caddy COPY conduit-example.toml conduit.toml ENV SERVER_NAME=localhost -ENV ROCKET_LOG=normal ENV CONDUIT_CONFIG=/workdir/conduit.toml RUN sed -i "s/port = 6167/port = 8008/g" conduit.toml RUN echo "allow_federation = true" >> conduit.toml RUN echo "allow_encryption = true" >> conduit.toml RUN echo "allow_registration = true" >> conduit.toml -RUN echo "log = \"info,rocket=info,_=off,sled=off\"" >> conduit.toml +RUN echo "log = \"warn,_=off,sled=off\"" >> conduit.toml RUN sed -i "s/address = \"127.0.0.1\"/address = \"0.0.0.0\"/g" conduit.toml # Enabled Caddy auto cert generation for complement provided CA. -RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json - +RUN echo '{"logging":{"logs":{"default":{"level":"WARN"}}}, "apps":{"http":{"https_port":8448,"servers":{"srv0":{"listen":[":8448"],"routes":[{"match":[{"host":["your.server.name"]}],"handle":[{"handler":"subroute","routes":[{"handle":[{"handler":"reverse_proxy","upstreams":[{"dial":"127.0.0.1:8008"}]}]}]}],"terminal":true}],"tls_connection_policies": [{"match": {"sni": ["your.server.name"]}}]}}},"pki": {"certificate_authorities": {"local": {"name": "Complement CA","root": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"},"intermediate": {"certificate": "/ca/ca.crt","private_key": "/ca/ca.key"}}}},"tls":{"automation":{"policies":[{"subjects":["your.server.name"],"issuer":{"module":"internal"},"on_demand":true},{"issuer":{"module":"internal", "ca": "local"}}]}}}}' > caddy.json + EXPOSE 8008 8448 CMD ([ -z "${COMPLEMENT_CA}" ] && echo "Error: Need Complement PKI support" && true) || \ diff --git a/tests/sytest/sytest-whitelist b/tests/sytest/sytest-whitelist index eda851ad..1c969dba 100644 --- a/tests/sytest/sytest-whitelist +++ b/tests/sytest/sytest-whitelist @@ -445,6 +445,9 @@ Typing notifications don't leak Uninvited users cannot join the room Unprivileged users can set m.room.topic if it only needs level 0 User appears in user directory +User in private room doesn't appear in user directory +User joining then leaving public room appears and dissappears from directory +User in shared private room does appear in user directory until leave User can create and send/receive messages in a room with version 1 User can create and send/receive messages in a room with version 2 User can create and send/receive messages in a room with version 3 @@ -510,3 +513,4 @@ remote user can join room with version 5 remote user can join room with version 6 setting 'm.room.name' respects room powerlevel setting 'm.room.power_levels' respects room powerlevel +Federation publicRoom Name/topic keys are correct