diff --git a/.gitignore b/.gitignore index 1b5d37b8..a34d70ad 100644 --- a/.gitignore +++ b/.gitignore @@ -68,3 +68,6 @@ cached_target # Direnv cache /.direnv + +# Gitlab CI cache +/.gitlab-ci.d diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index f5ab4246..48fe6244 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,244 +1,66 @@ stages: - - build - - build docker image - - test - - upload artifacts + - ci + - artifacts variables: - # Make GitLab CI go fast: - GIT_SUBMODULE_STRATEGY: recursive - FF_USE_FASTZIP: 1 - CACHE_COMPRESSION_LEVEL: fastest + # Makes some things print in color + TERM: ansi -# --------------------------------------------------------------------- # -# Create and publish docker image # -# --------------------------------------------------------------------- # +before_script: + # Enable nix-command and flakes + - if command -v nix > /dev/null; then echo "experimental-features = nix-command flakes" >> /etc/nix/nix.conf; fi -.docker-shared-settings: - stage: "build docker image" - needs: [] - tags: [ "docker" ] - variables: - # Docker in Docker: - DOCKER_BUILDKIT: 1 - image: - name: docker.io/docker - services: - - name: docker.io/docker:dind - alias: docker - script: - - apk add openssh-client - - eval $(ssh-agent -s) - - mkdir -p ~/.ssh && chmod 700 ~/.ssh - - printf "Host *\n\tStrictHostKeyChecking no\n\n" >> ~/.ssh/config - - sh .gitlab/setup-buildx-remote-builders.sh - # Authorize against this project's own image registry: - - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY - # Build multiplatform image and push to temporary tag: - - > - docker buildx build - --platform "linux/arm/v7,linux/arm64,linux/amd64" - --pull - --tag "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" - --push - --provenance=false - --file "Dockerfile" . - # Build multiplatform image to deb stage and extract their .deb files: - - > - docker buildx build - --platform "linux/arm/v7,linux/arm64,linux/amd64" - --target "packager-result" - --output="type=local,dest=/tmp/build-output" - --provenance=false - --file "Dockerfile" . - # Build multiplatform image to binary stage and extract their binaries: - - > - docker buildx build - --platform "linux/arm/v7,linux/arm64,linux/amd64" - --target "builder-result" - --output="type=local,dest=/tmp/build-output" - --provenance=false - --file "Dockerfile" . - # Copy to GitLab container registry: - - > - docker buildx imagetools create - --tag "$CI_REGISTRY_IMAGE/$TAG" - --tag "$CI_REGISTRY_IMAGE/$TAG-bullseye" - --tag "$CI_REGISTRY_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA" - "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" - # if DockerHub credentials exist, also copy to dockerhub: - - if [ -n "${DOCKER_HUB}" ]; then docker login -u "$DOCKER_HUB_USER" -p "$DOCKER_HUB_PASSWORD" "$DOCKER_HUB"; fi - - > - if [ -n "${DOCKER_HUB}" ]; then - docker buildx imagetools create - --tag "$DOCKER_HUB_IMAGE/$TAG" - --tag "$DOCKER_HUB_IMAGE/$TAG-bullseye" - --tag "$DOCKER_HUB_IMAGE/$TAG-commit-$CI_COMMIT_SHORT_SHA" - "$CI_REGISTRY_IMAGE/temporary-ci-images:$CI_JOB_ID" - ; fi - - mv /tmp/build-output ./ - artifacts: - paths: - - "./build-output/" - -docker:next: - extends: .docker-shared-settings - rules: - - if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "next"' - variables: - TAG: "matrix-conduit:next" - -docker:master: - extends: .docker-shared-settings - rules: - - if: '$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_BRANCH == "master"' - variables: - TAG: "matrix-conduit:latest" + # Add nix-community binary cache + - if command -v nix > /dev/null; then echo "extra-substituters = https://nix-community.cachix.org" >> /etc/nix/nix.conf; fi + - if command -v nix > /dev/null; then echo "extra-trusted-public-keys = nix-community.cachix.org-1:mB9FSh9qf2dCimDSUo8Zy7bkq5CX+/rkCWyvRCYg3Fs=" >> /etc/nix/nix.conf; fi -docker:tags: - extends: .docker-shared-settings - rules: - - if: "$BUILD_SERVER_SSH_PRIVATE_KEY && $CI_COMMIT_TAG" - variables: - TAG: "matrix-conduit:$CI_COMMIT_TAG" + # Install direnv and nix-direnv + - if command -v nix > /dev/null; then nix-env -iA nixpkgs.direnv nixpkgs.nix-direnv; fi + # Allow .envrc + - if command -v nix > /dev/null; then direnv allow; fi -docker build debugging: - extends: .docker-shared-settings - rules: - - if: "$CI_MERGE_REQUEST_TITLE =~ /.*[Dd]ocker.*/" - variables: - TAG: "matrix-conduit-docker-tests:latest" + # Set CARGO_HOME to a cacheable path + - export CARGO_HOME="$(git rev-parse --show-toplevel)/.gitlab-ci.d/cargo" -# --------------------------------------------------------------------- # -# Run tests # -# --------------------------------------------------------------------- # - -cargo check: - stage: test - image: docker.io/rust:1.70.0-bullseye - needs: [] - interruptible: true - before_script: - - "rustup show && rustc --version && cargo --version" # Print version info for debugging - - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb +ci: + stage: ci + image: nixos/nix:2.19.2 script: - - cargo check - - -.test-shared-settings: - stage: "test" - needs: [] - image: "registry.gitlab.com/jfowl/conduit-containers/rust-with-tools:latest" - tags: ["docker"] - variables: - CARGO_INCREMENTAL: "false" # https://matklad.github.io/2021/09/04/fast-rust-builds.html#ci-workflow - interruptible: true + - direnv exec . engage + cache: + key: nix + paths: + - target + - .gitlab-ci.d -test:cargo: - extends: .test-shared-settings - before_script: - - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb +docker: + stage: artifacts + image: nixos/nix:2.19.2 script: - - rustc --version && cargo --version # Print version info for debugging - - "cargo test --color always --workspace --verbose --locked --no-fail-fast" + - nix build .#oci-image -test:clippy: - extends: .test-shared-settings - allow_failure: true - before_script: - - rustup component add clippy - - apt-get update && apt-get -y --no-install-recommends install libclang-dev # dependency for rocksdb - script: - - rustc --version && cargo --version # Print version info for debugging - - "cargo clippy --color always --verbose --message-format=json | gitlab-report -p clippy > $CI_PROJECT_DIR/gl-code-quality-report.json" + # Make the output less difficult to find + - cp result docker-image.tar.gz artifacts: - when: always - reports: - codequality: gl-code-quality-report.json + paths: + - docker-image.tar.gz -test:format: - extends: .test-shared-settings - before_script: - - rustup component add rustfmt +debian: + stage: artifacts + image: rust:1.70.0 script: - - cargo fmt --all -- --check + - apt-get update && apt-get install -y --no-install-recommends libclang-dev + - cargo install cargo-deb + - cargo deb -test:audit: - extends: .test-shared-settings - allow_failure: true - script: - - cargo audit --color always || true - - cargo audit --stale --json | gitlab-report -p audit > gl-sast-report.json - artifacts: - when: always - reports: - sast: gl-sast-report.json - -test:dockerlint: - stage: "test" - needs: [] - image: "ghcr.io/hadolint/hadolint@sha256:6c4b7c23f96339489dd35f21a711996d7ce63047467a9a562287748a03ad5242" # 2.8.0-alpine - interruptible: true - script: - - hadolint --version - # First pass: Print for CI log: - - > - hadolint - --no-fail --verbose - ./Dockerfile - # Then output the results into a json for GitLab to pretty-print this in the MR: - - > - hadolint - --format gitlab_codeclimate - --failure-threshold error - ./Dockerfile > dockerlint.json + # Make the output less difficult to find + - mv target/debian/*.deb . artifacts: - when: always - reports: - codequality: dockerlint.json paths: - - dockerlint.json - rules: - - if: '$CI_COMMIT_REF_NAME != "master"' - changes: - - docker/*Dockerfile - - Dockerfile - - .gitlab-ci.yml - - if: '$CI_COMMIT_REF_NAME == "master"' - - if: '$CI_COMMIT_REF_NAME == "next"' - -# --------------------------------------------------------------------- # -# Store binaries as package so they have download urls # -# --------------------------------------------------------------------- # - -# DISABLED FOR NOW, NEEDS TO BE FIXED AT A LATER TIME: - -#publish:package: -# stage: "upload artifacts" -# needs: -# - "docker:tags" -# rules: -# - if: "$CI_COMMIT_TAG" -# image: curlimages/curl:latest -# tags: ["docker"] -# variables: -# GIT_STRATEGY: "none" # Don't need a clean copy of the code, we just operate on artifacts -# script: -# - 'BASE_URL="${CI_API_V4_URL}/projects/${CI_PROJECT_ID}/packages/generic/conduit-${CI_COMMIT_REF_SLUG}/build-${CI_PIPELINE_ID}"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit "${BASE_URL}/conduit-x86_64-unknown-linux-gnu"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit "${BASE_URL}/conduit-armv7-unknown-linux-gnu"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit "${BASE_URL}/conduit-aarch64-unknown-linux-gnu"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_amd64/conduit.deb "${BASE_URL}/conduit-x86_64-unknown-linux-gnu.deb"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm_v7/conduit.deb "${BASE_URL}/conduit-armv7-unknown-linux-gnu.deb"' -# - 'curl --header "JOB-TOKEN: $CI_JOB_TOKEN" --upload-file build-output/linux_arm64/conduit.deb "${BASE_URL}/conduit-aarch64-unknown-linux-gnu.deb"' - -# Avoid duplicate pipelines -# See: https://docs.gitlab.com/ee/ci/yaml/workflow.html#switch-between-branch-pipelines-and-merge-request-pipelines -workflow: - rules: - - if: '$CI_PIPELINE_SOURCE == "merge_request_event"' - - if: "$CI_COMMIT_BRANCH && $CI_OPEN_MERGE_REQUESTS" - when: never - - if: "$CI_COMMIT_BRANCH" - - if: "$CI_COMMIT_TAG" + - "*.deb" + cache: + key: debian + paths: + - target + - .gitlab-ci.d diff --git a/Cargo.lock b/Cargo.lock index 9c8596aa..b4bcdc0a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -180,15 +180,6 @@ version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8c3c1a368f70d6cf7302d78f8f7093da241fb8e8807c05cc9e51a125895a6d5b" -[[package]] -name = "bincode" -version = "1.3.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1f45e9417d87227c7a56d22e471c6206462cba514c7590c09aff4cf6d1ddcad" -dependencies = [ - "serde", -] - [[package]] name = "bindgen" version = "0.65.1" @@ -368,11 +359,9 @@ dependencies = [ "base64 0.21.2", "bytes", "clap", - "crossbeam", "directories", "figment", "futures-util", - "heed", "hmac", "http", "image", @@ -487,20 +476,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "crossbeam" -version = "0.8.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2801af0d36612ae591caa9568261fddce32ce6e08a7275ea334a06a4ad021a2c" -dependencies = [ - "cfg-if", - "crossbeam-channel", - "crossbeam-deque", - "crossbeam-epoch", - "crossbeam-queue", - "crossbeam-utils", -] - [[package]] name = "crossbeam-channel" version = "0.5.8" @@ -511,40 +486,6 @@ dependencies = [ "crossbeam-utils", ] -[[package]] -name = "crossbeam-deque" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ce6fd6f855243022dcecf8702fef0c297d4338e226845fe067f6341ad9fa0cef" -dependencies = [ - "cfg-if", - "crossbeam-epoch", - "crossbeam-utils", -] - -[[package]] -name = "crossbeam-epoch" -version = "0.9.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae211234986c545741a7dc064309f67ee1e5ad243d0e48335adc0484d960bcc7" -dependencies = [ - "autocfg", - "cfg-if", - "crossbeam-utils", - "memoffset 0.9.0", - "scopeguard", -] - -[[package]] -name = "crossbeam-queue" -version = "0.3.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1cfb3ea8a53f37c40dea2c7bedcbd88bdfae54f5e2175d6ecaff1c988353add" -dependencies = [ - "cfg-if", - "crossbeam-utils", -] - [[package]] name = "crossbeam-utils" version = "0.8.16" @@ -986,42 +927,6 @@ version = "0.4.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" -[[package]] -name = "heed" -version = "0.10.6" -source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" -dependencies = [ - "bytemuck", - "byteorder", - "heed-traits", - "heed-types", - "libc", - "lmdb-rkv-sys", - "once_cell", - "page_size", - "serde", - "synchronoise", - "url", -] - -[[package]] -name = "heed-traits" -version = "0.7.0" -source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" - -[[package]] -name = "heed-types" -version = "0.7.2" -source = "git+https://github.com/timokoesters/heed.git?rev=f6f825da7fb2c758867e05ad973ef800a6fe1d5d#f6f825da7fb2c758867e05ad973ef800a6fe1d5d" -dependencies = [ - "bincode", - "bytemuck", - "byteorder", - "heed-traits", - "serde", - "serde_json", -] - [[package]] name = "hermit-abi" version = "0.2.6" @@ -1379,17 +1284,6 @@ version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" -[[package]] -name = "lmdb-rkv-sys" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b9ce6b3be08acefa3003c57b7565377432a89ec24476bbe72e11d101f852fe" -dependencies = [ - "cc", - "libc", - "pkg-config", -] - [[package]] name = "lock_api" version = "0.4.10" @@ -1473,15 +1367,6 @@ dependencies = [ "autocfg", ] -[[package]] -name = "memoffset" -version = "0.9.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a634b1c61a95585bd15607c6ab0c4e5b226e695ff2800ba0cdccddf208c406c" -dependencies = [ - "autocfg", -] - [[package]] name = "mime" version = "0.3.17" @@ -1524,7 +1409,7 @@ dependencies = [ "bitflags 1.3.2", "cfg-if", "libc", - "memoffset 0.7.1", + "memoffset", "pin-utils", "static_assertions", ] @@ -1701,16 +1586,6 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" -[[package]] -name = "page_size" -version = "0.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eebde548fbbf1ea81a99b128872779c437752fb99f217c45245e1a61dcd9edcd" -dependencies = [ - "libc", - "winapi", -] - [[package]] name = "parking_lot" version = "0.12.1" @@ -2727,15 +2602,6 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" -[[package]] -name = "synchronoise" -version = "1.0.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3dbc01390fc626ce8d1cffe3376ded2b72a11bb70e1c75f404a210e4daa4def2" -dependencies = [ - "crossbeam-queue", -] - [[package]] name = "thiserror" version = "1.0.40" diff --git a/Cargo.toml b/Cargo.toml index ff1785e4..8c139156 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -9,10 +9,12 @@ readme = "README.md" version = "0.7.0-alpha" edition = "2021" -# When changing this, make sure to update the `flake.lock` file by running -# `nix flake update`. If you don't have Nix installed or otherwise don't know -# how to do this, ping `@charles:computer.surgery` or `@dusk:gaze.systems` in -# the matrix room. +# When changing this, make sure to update the hash near the text "THE +# rust-version HASH" in `flake.nix`. If you don't have Nix installed or +# otherwise don't know how to do this, ping `@charles:computer.surgery` or +# `@dusk:gaze.systems` in the matrix room. +# +# Also make sure to update the docker image tags in `.gitlab-ci.yml`. rust-version = "1.70.0" # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html @@ -78,10 +80,10 @@ tracing-opentelemetry = "0.18.0" lru-cache = "0.1.2" rusqlite = { version = "0.29.0", optional = true, features = ["bundled"] } parking_lot = { version = "0.12.1", optional = true } -crossbeam = { version = "0.8.2", optional = true } +# crossbeam = { version = "0.8.2", optional = true } num_cpus = "1.15.0" threadpool = "1.8.1" -heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } +# heed = { git = "https://github.com/timokoesters/heed.git", rev = "f6f825da7fb2c758867e05ad973ef800a6fe1d5d", optional = true } # Used for ruma wrapper serde_html_form = "0.2.0" @@ -112,7 +114,7 @@ default = ["conduit_bin", "backend_sqlite", "backend_rocksdb", "systemd"] #backend_sled = ["sled"] backend_persy = ["persy", "parking_lot"] backend_sqlite = ["sqlite"] -backend_heed = ["heed", "crossbeam"] +#backend_heed = ["heed", "crossbeam"] backend_rocksdb = ["rocksdb"] jemalloc = ["tikv-jemalloc-ctl", "tikv-jemallocator"] sqlite = ["rusqlite", "parking_lot", "tokio/signal"] diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index 943f6864..00000000 --- a/Dockerfile +++ /dev/null @@ -1,132 +0,0 @@ -# syntax=docker/dockerfile:1 -FROM docker.io/rust:1.70-bullseye AS base - -FROM base AS builder -WORKDIR /usr/src/conduit - -# Install required packages to build Conduit and it's dependencies -RUN apt-get update && \ - apt-get -y --no-install-recommends install libclang-dev=1:11.0-51+nmu5 - -# == Build dependencies without our own code separately for caching == -# -# Need a fake main.rs since Cargo refuses to build anything otherwise. -# -# See https://github.com/rust-lang/cargo/issues/2644 for a Cargo feature -# request that would allow just dependencies to be compiled, presumably -# regardless of whether source files are available. -RUN mkdir src && touch src/lib.rs && echo 'fn main() {}' > src/main.rs -COPY Cargo.toml Cargo.lock ./ -RUN cargo build --release && rm -r src - -# Copy over actual Conduit sources -COPY src src - -# main.rs and lib.rs need their timestamp updated for this to work correctly since -# otherwise the build with the fake main.rs from above is newer than the -# source files (COPY preserves timestamps). -# -# Builds conduit and places the binary at /usr/src/conduit/target/release/conduit -RUN touch src/main.rs && touch src/lib.rs && cargo build --release - - -# ONLY USEFUL FOR CI: target stage to extract build artifacts -FROM scratch AS builder-result -COPY --from=builder /usr/src/conduit/target/release/conduit /conduit - - - -# --------------------------------------------------------------------------------------------------------------- -# Build cargo-deb, a tool to package up rust binaries into .deb packages for Debian/Ubuntu based systems: -# --------------------------------------------------------------------------------------------------------------- -FROM base AS build-cargo-deb - -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - dpkg \ - dpkg-dev \ - liblzma-dev - -RUN cargo install cargo-deb -# => binary is in /usr/local/cargo/bin/cargo-deb - - -# --------------------------------------------------------------------------------------------------------------- -# Package conduit build-result into a .deb package: -# --------------------------------------------------------------------------------------------------------------- -FROM builder AS packager -WORKDIR /usr/src/conduit - -COPY ./LICENSE ./LICENSE -COPY ./README.md ./README.md -COPY debian ./debian -COPY --from=build-cargo-deb /usr/local/cargo/bin/cargo-deb /usr/local/cargo/bin/cargo-deb - -# --no-build makes cargo-deb reuse already compiled project -RUN cargo deb --no-build -# => Package is in /usr/src/conduit/target/debian/__.deb - - -# ONLY USEFUL FOR CI: target stage to extract build artifacts -FROM scratch AS packager-result -COPY --from=packager /usr/src/conduit/target/debian/*.deb /conduit.deb - - -# --------------------------------------------------------------------------------------------------------------- -# Stuff below this line actually ends up in the resulting docker image -# --------------------------------------------------------------------------------------------------------------- -FROM docker.io/debian:bullseye-slim AS runner - -# Standard port on which Conduit launches. -# You still need to map the port when using the docker command or docker-compose. -EXPOSE 6167 - -ARG DEFAULT_DB_PATH=/var/lib/matrix-conduit - -ENV CONDUIT_PORT=6167 \ - CONDUIT_ADDRESS="0.0.0.0" \ - CONDUIT_DATABASE_PATH=${DEFAULT_DB_PATH} \ - CONDUIT_CONFIG='' -# └─> Set no config file to do all configuration with env vars - -# Conduit needs: -# dpkg: to install conduit.deb -# ca-certificates: for https -# iproute2 & wget: for the healthcheck script -RUN apt-get update && apt-get -y --no-install-recommends install \ - dpkg \ - ca-certificates \ - iproute2 \ - wget \ - && rm -rf /var/lib/apt/lists/* - -# Test if Conduit is still alive, uses the same endpoint as Element -COPY ./docker/healthcheck.sh /srv/conduit/healthcheck.sh -HEALTHCHECK --start-period=5s --interval=5s CMD ./healthcheck.sh - -# Install conduit.deb: -COPY --from=packager /usr/src/conduit/target/debian/*.deb /srv/conduit/ -RUN dpkg -i /srv/conduit/*.deb - -# Improve security: Don't run stuff as root, that does not need to run as root -# Most distros also use 1000:1000 for the first real user, so this should resolve volume mounting problems. -ARG USER_ID=1000 -ARG GROUP_ID=1000 -RUN set -x ; \ - groupadd -r -g ${GROUP_ID} conduit ; \ - useradd -l -r -M -d /srv/conduit -o -u ${USER_ID} -g conduit conduit && exit 0 ; exit 1 - -# Create database directory, change ownership of Conduit files to conduit user and group and make the healthcheck executable: -RUN chown -cR conduit:conduit /srv/conduit && \ - chmod +x /srv/conduit/healthcheck.sh && \ - mkdir -p ${DEFAULT_DB_PATH} && \ - chown -cR conduit:conduit ${DEFAULT_DB_PATH} - -# Change user to conduit, no root permissions afterwards: -USER conduit -# Set container home directory -WORKDIR /srv/conduit - -# Run Conduit and print backtraces on panics -ENV RUST_BACKTRACE=1 -ENTRYPOINT [ "/usr/sbin/matrix-conduit" ] diff --git a/engage.toml b/engage.toml new file mode 100644 index 00000000..3e8884eb --- /dev/null +++ b/engage.toml @@ -0,0 +1,64 @@ +interpreter = ["bash", "-euo", "pipefail", "-c"] + +[[task]] +name = "engage" +group = "versions" +script = "engage --version" + +[[task]] +name = "rustc" +group = "versions" +script = "rustc --version" + +[[task]] +name = "cargo" +group = "versions" +script = "cargo --version" + +[[task]] +name = "cargo-fmt" +group = "versions" +script = "cargo fmt --version" + +[[task]] +name = "rustdoc" +group = "versions" +script = "rustdoc --version" + +[[task]] +name = "cargo-clippy" +group = "versions" +script = "cargo clippy -- --version" + +[[task]] +name = "cargo-fmt" +group = "lints" +script = "cargo fmt --check -- --color=always" + +[[task]] +name = "cargo-doc" +group = "lints" +script = """ +RUSTDOCFLAGS="-D warnings" cargo doc \ + --workspace \ + --no-deps \ + --document-private-items \ + --color always +""" + +[[task]] +name = "cargo-clippy" +group = "lints" +script = "cargo clippy --workspace --all-targets --color=always -- -D warnings" + +[[task]] +name = "cargo" +group = "tests" +script = """ +cargo test \ + --workspace \ + --all-targets \ + --color=always \ + -- \ + --color=always +""" diff --git a/flake.lock b/flake.lock index 00655252..128539bb 100644 --- a/flake.lock +++ b/flake.lock @@ -2,21 +2,16 @@ "nodes": { "crane": { "inputs": { - "flake-compat": "flake-compat", - "flake-utils": [ - "flake-utils" - ], "nixpkgs": [ "nixpkgs" - ], - "rust-overlay": "rust-overlay" + ] }, "locked": { - "lastModified": 1688772518, - "narHash": "sha256-ol7gZxwvgLnxNSZwFTDJJ49xVY5teaSvF7lzlo3YQfM=", + "lastModified": 1705597458, + "narHash": "sha256-vJ8Ib9ruxbaBxGEcA0d7dHqxpc6Z+SGR2XIxVeSMuLM=", "owner": "ipetkov", "repo": "crane", - "rev": "8b08e96c9af8c6e3a2b69af5a7fa168750fcf88e", + "rev": "742170d82cd65c925dcddc5c3d6185699fbbad08", "type": "github" }, "original": { @@ -33,11 +28,11 @@ "rust-analyzer-src": "rust-analyzer-src" }, "locked": { - "lastModified": 1689488573, - "narHash": "sha256-diVASflKCCryTYv0djvMnP2444mFsIG0ge5pa7ahauQ=", + "lastModified": 1705559032, + "narHash": "sha256-Cb+Jd1+Gz4Wi+8elPnUIHnqQmE1qjDRZ+PsJaPaAffY=", "owner": "nix-community", "repo": "fenix", - "rev": "39096fe3f379036ff4a5fa198950b8e79defe939", + "rev": "e132ea0eb0c799a2109a91688e499d7bf4962801", "type": "github" }, "original": { @@ -46,47 +41,46 @@ "type": "github" } }, - "flake-compat": { - "flake": false, + "flake-utils": { + "inputs": { + "systems": "systems" + }, "locked": { - "lastModified": 1673956053, - "narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=", - "owner": "edolstra", - "repo": "flake-compat", - "rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9", + "lastModified": 1705309234, + "narHash": "sha256-uNRRNRKmJyCRC/8y1RqBkqWBLM034y4qN7EprSdmgyA=", + "owner": "numtide", + "repo": "flake-utils", + "rev": "1ef2e671c3b0c19053962c07dbda38332dcebf26", "type": "github" }, "original": { - "owner": "edolstra", - "repo": "flake-compat", + "owner": "numtide", + "repo": "flake-utils", "type": "github" } }, - "flake-utils": { - "inputs": { - "systems": "systems" - }, + "nix-filter": { "locked": { - "lastModified": 1689068808, - "narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=", + "lastModified": 1705332318, + "narHash": "sha256-kcw1yFeJe9N4PjQji9ZeX47jg0p9A0DuU4djKvg1a7I=", "owner": "numtide", - "repo": "flake-utils", - "rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4", + "repo": "nix-filter", + "rev": "3449dc925982ad46246cfc36469baf66e1b64f17", "type": "github" }, "original": { "owner": "numtide", - "repo": "flake-utils", + "repo": "nix-filter", "type": "github" } }, "nixpkgs": { "locked": { - "lastModified": 1689444953, - "narHash": "sha256-0o56bfb2LC38wrinPdCGLDScd77LVcr7CrH1zK7qvDg=", + "lastModified": 1705496572, + "narHash": "sha256-rPIe9G5EBLXdBdn9ilGc0nq082lzQd0xGGe092R/5QE=", "owner": "NixOS", "repo": "nixpkgs", - "rev": "8acef304efe70152463a6399f73e636bcc363813", + "rev": "842d9d80cfd4560648c785f8a4e6f3b096790e19", "type": "github" }, "original": { @@ -101,17 +95,18 @@ "crane": "crane", "fenix": "fenix", "flake-utils": "flake-utils", + "nix-filter": "nix-filter", "nixpkgs": "nixpkgs" } }, "rust-analyzer-src": { "flake": false, "locked": { - "lastModified": 1689441253, - "narHash": "sha256-4MSDZaFI4DOfsLIZYPMBl0snzWhX1/OqR/QHir382CY=", + "lastModified": 1705523001, + "narHash": "sha256-TWq5vJ6m+9HGSDMsQAmz1TMegMi79R3TTyKjnPWsQp8=", "owner": "rust-lang", "repo": "rust-analyzer", - "rev": "996e054f1eb1dbfc8455ecabff0f6ff22ba7f7c8", + "rev": "9d9b34354d2f13e33568c9c55b226dd014a146a0", "type": "github" }, "original": { @@ -121,31 +116,6 @@ "type": "github" } }, - "rust-overlay": { - "inputs": { - "flake-utils": [ - "crane", - "flake-utils" - ], - "nixpkgs": [ - "crane", - "nixpkgs" - ] - }, - "locked": { - "lastModified": 1688351637, - "narHash": "sha256-CLTufJ29VxNOIZ8UTg0lepsn3X03AmopmaLTTeHDCL4=", - "owner": "oxalica", - "repo": "rust-overlay", - "rev": "f9b92316727af9e6c7fee4a761242f7f46880329", - "type": "github" - }, - "original": { - "owner": "oxalica", - "repo": "rust-overlay", - "type": "github" - } - }, "systems": { "locked": { "lastModified": 1681028828, diff --git a/flake.nix b/flake.nix index eb3a31cb..b4a32d0a 100644 --- a/flake.nix +++ b/flake.nix @@ -2,6 +2,7 @@ inputs = { nixpkgs.url = "github:NixOS/nixpkgs?ref=nixos-unstable"; flake-utils.url = "github:numtide/flake-utils"; + nix-filter.url = "github:numtide/nix-filter"; fenix = { url = "github:nix-community/fenix"; @@ -10,7 +11,6 @@ crane = { url = "github:ipetkov/crane"; inputs.nixpkgs.follows = "nixpkgs"; - inputs.flake-utils.follows = "flake-utils"; }; }; @@ -18,6 +18,7 @@ { self , nixpkgs , flake-utils + , nix-filter , fenix , crane @@ -43,51 +44,91 @@ sha256 = "sha256-gdYqng0y9iHYzYPAdkC/ka3DRny3La/S5G8ASj0Ayyc="; }; - # The system's RocksDB - ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; - ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; + mkToolchain = fenix.packages.${system}.combine; + + buildToolchain = mkToolchain (with toolchain; [ + cargo + rustc + ]); + + devToolchain = mkToolchain (with toolchain; [ + cargo + clippy + rust-src + rustc + + # Always use nightly rustfmt because most of its options are unstable + fenix.packages.${system}.latest.rustfmt + ]); + + builder = + ((crane.mkLib pkgs).overrideToolchain buildToolchain).buildPackage; - # Shared between the package and the devShell nativeBuildInputs = (with pkgs.rustPlatform; [ bindgenHook ]); - builder = - ((crane.mkLib pkgs).overrideToolchain toolchain.toolchain).buildPackage; + env = { + ROCKSDB_INCLUDE_DIR = "${pkgs.rocksdb}/include"; + ROCKSDB_LIB_DIR = "${pkgs.rocksdb}/lib"; + }; in { packages.default = builder { - src = ./.; + src = nix-filter { + root = ./.; + include = [ + "src" + "Cargo.toml" + "Cargo.lock" + ]; + }; + + # This is redundant with CI + doCheck = false; inherit - stdenv + env nativeBuildInputs - ROCKSDB_INCLUDE_DIR - ROCKSDB_LIB_DIR; + stdenv; + + meta.mainProgram = cargoToml.package.name; }; - devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { - # Rust Analyzer needs to be able to find the path to default crate - # sources, and it can read this environment variable to do so - RUST_SRC_PATH = "${toolchain.rust-src}/lib/rustlib/src/rust/library"; + packages.oci-image = + let + package = self.packages.${system}.default; + in + pkgs.dockerTools.buildImage { + name = package.pname; + tag = "latest"; + config = { + # Use the `tini` init system so that signals (e.g. ctrl+c/SIGINT) are + # handled as expected + Entrypoint = [ + "${pkgs.lib.getExe' pkgs.tini "tini"}" + "--" + ]; + Cmd = [ + "${pkgs.lib.getExe package}" + ]; + }; + }; - inherit - ROCKSDB_INCLUDE_DIR - ROCKSDB_LIB_DIR; + devShells.default = (pkgs.mkShell.override { inherit stdenv; }) { + env = env // { + # Rust Analyzer needs to be able to find the path to default crate + # sources, and it can read this environment variable to do so. The + # `rust-src` component is required in order for this to work. + RUST_SRC_PATH = "${devToolchain}/lib/rustlib/src/rust/library"; + }; # Development tools - nativeBuildInputs = nativeBuildInputs ++ (with toolchain; [ - cargo - clippy - rust-src - rustc - rustfmt + nativeBuildInputs = nativeBuildInputs ++ [ + devToolchain + ] ++ (with pkgs; [ + engage ]); }; - - checks = { - packagesDefault = self.packages.${system}.default; - devShellsDefault = self.devShells.${system}.default; - }; }); } diff --git a/src/api/client_server/keys.rs b/src/api/client_server/keys.rs index 7dbe040d..9fd00897 100644 --- a/src/api/client_server/keys.rs +++ b/src/api/client_server/keys.rs @@ -360,7 +360,7 @@ pub(crate) async fn get_keys_helper bool>( .bad_query_ratelimiter .read() .unwrap() - .get(&*server) + .get(server) { // Exponential backoff let mut min_elapsed_duration = Duration::from_secs(30) * (*tries) * (*tries); @@ -393,7 +393,7 @@ pub(crate) async fn get_keys_helper bool>( ), ) .await - .map_err(|e| Error::BadServerResponse("Query took too long")), + .map_err(|_e| Error::BadServerResponse("Query took too long")), ) }) .collect(); diff --git a/src/api/client_server/membership.rs b/src/api/client_server/membership.rs index 346f2575..ed59691d 100644 --- a/src/api/client_server/membership.rs +++ b/src/api/client_server/membership.rs @@ -400,7 +400,7 @@ pub async fn get_member_events_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -435,7 +435,7 @@ pub async fn joined_members_route( if !services() .rooms .state_accessor - .user_can_see_state_events(&sender_user, &body.room_id)? + .user_can_see_state_events(sender_user, &body.room_id)? { return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -712,7 +712,7 @@ async fn join_room_by_id_helper( } info!("Running send_join auth check"); - if !state_res::event_auth::auth_check( + let authenticated = state_res::event_auth::auth_check( &state_res::RoomVersion::new(&room_version_id).expect("room version is supported"), &parsed_join_pdu, None::, // TODO: third party invite @@ -735,7 +735,9 @@ async fn join_room_by_id_helper( .map_err(|e| { warn!("Auth check failed: {e}"); Error::BadRequest(ErrorKind::InvalidParam, "Auth check failed") - })? { + })?; + + if !authenticated { return Err(Error::BadRequest( ErrorKind::InvalidParam, "Auth check failed", diff --git a/src/api/client_server/message.rs b/src/api/client_server/message.rs index 750e0303..0952092b 100644 --- a/src/api/client_server/message.rs +++ b/src/api/client_server/message.rs @@ -124,7 +124,7 @@ pub async fn get_message_events_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); services().rooms.lazy_loading.lazy_load_confirm_delivery( sender_user, diff --git a/src/api/client_server/relations.rs b/src/api/client_server/relations.rs index a7cea786..124f1310 100644 --- a/src/api/client_server/relations.rs +++ b/src/api/client_server/relations.rs @@ -23,7 +23,7 @@ pub async fn get_relating_events_with_rel_type_and_event_type_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body @@ -73,7 +73,7 @@ pub async fn get_relating_events_with_rel_type_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body @@ -121,7 +121,7 @@ pub async fn get_relating_events_route( let to = body .to .as_ref() - .and_then(|t| PduCount::try_from_string(&t).ok()); + .and_then(|t| PduCount::try_from_string(t).ok()); // Use limit or else 10, with maximum 100 let limit = body diff --git a/src/api/client_server/session.rs b/src/api/client_server/session.rs index 5ce62af9..c17bd99b 100644 --- a/src/api/client_server/session.rs +++ b/src/api/client_server/session.rs @@ -117,12 +117,10 @@ pub async fn login_route(body: Ruma) -> Result { warn!("Unsupported or unknown login type: {:?}", &body.login_info); @@ -163,6 +161,8 @@ pub async fn login_route(body: Ruma) -> Result since; let mut timeline_users = HashSet::new(); @@ -599,16 +600,16 @@ async fn load_joined_room( } services().rooms.lazy_loading.lazy_load_confirm_delivery( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, sincecount, )?; // Database queries: let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { s } else { error!("Room {} has no state", room_id); @@ -618,7 +619,7 @@ async fn load_joined_room( let since_shortstatehash = services() .rooms .user - .get_token_shortstatehash(&room_id, since)?; + .get_token_shortstatehash(room_id, since)?; let (heroes, joined_member_count, invited_member_count, joined_since_last_sync, state_events) = if timeline_pdus.is_empty() && since_shortstatehash == Some(current_shortstatehash) { @@ -630,12 +631,12 @@ async fn load_joined_room( let joined_member_count = services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or(0); let invited_member_count = services() .rooms .state_cache - .room_invited_count(&room_id)? + .room_invited_count(room_id)? .unwrap_or(0); // Recalculate heroes (first 5 members) @@ -648,7 +649,7 @@ async fn load_joined_room( for hero in services() .rooms .timeline - .all_pdus(&sender_user, &room_id)? + .all_pdus(sender_user, room_id)? .filter_map(|pdu| pdu.ok()) // Ignore all broken pdus .filter(|(_, pdu)| pdu.kind == TimelineEventType::RoomMember) .map(|(_, pdu)| { @@ -669,11 +670,11 @@ async fn load_joined_room( ) && (services() .rooms .state_cache - .is_joined(&user_id, &room_id)? + .is_joined(&user_id, room_id)? || services() .rooms .state_cache - .is_invited(&user_id, &room_id)?) + .is_invited(&user_id, room_id)?) { Ok::<_, Error>(Some(state_key.clone())) } else { @@ -789,17 +790,17 @@ async fn load_joined_room( // Reset lazy loading because this is an initial sync services().rooms.lazy_loading.lazy_load_reset( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, )?; // The state_events above should contain all timeline_users, let's mark them as lazy // loaded. services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, lazy_loaded, next_batchcount, ); @@ -866,14 +867,14 @@ async fn load_joined_room( } if !services().rooms.lazy_loading.lazy_load_was_sent_before( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, &event.sender, )? || lazy_load_send_redundant { if let Some(member_event) = services().rooms.state_accessor.room_state_get( - &room_id, + room_id, &StateEventType::RoomMember, event.sender.as_str(), )? { @@ -884,9 +885,9 @@ async fn load_joined_room( } services().rooms.lazy_loading.lazy_load_mark_sent( - &sender_user, - &sender_device, - &room_id, + sender_user, + sender_device, + room_id, lazy_loaded, next_batchcount, ); @@ -934,7 +935,7 @@ async fn load_joined_room( match new_membership { MembershipState::Join => { // A new user joined an encrypted room - if !share_encrypted_room(&sender_user, &user_id, &room_id)? { + if !share_encrypted_room(sender_user, &user_id, room_id)? { device_list_updates.insert(user_id); } } @@ -954,15 +955,15 @@ async fn load_joined_room( services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .flatten() .filter(|user_id| { // Don't send key updates from the sender to the sender - &sender_user != user_id + sender_user != user_id }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) + !share_encrypted_room(sender_user, user_id, room_id) .unwrap_or(false) }), ); @@ -997,7 +998,7 @@ async fn load_joined_room( services() .rooms .user - .notification_count(&sender_user, &room_id)? + .notification_count(sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ) @@ -1010,7 +1011,7 @@ async fn load_joined_room( services() .rooms .user - .highlight_count(&sender_user, &room_id)? + .highlight_count(sender_user, room_id)? .try_into() .expect("highlight count can't go that high"), ) @@ -1039,15 +1040,15 @@ async fn load_joined_room( .rooms .edus .read_receipt - .readreceipts_since(&room_id, since) + .readreceipts_since(room_id, since) .filter_map(|r| r.ok()) // Filter out buggy events .map(|(_, _, v)| v) .collect(); - if services().rooms.edus.typing.last_typing_update(&room_id)? > since { + if services().rooms.edus.typing.last_typing_update(room_id)? > since { edus.push( serde_json::from_str( - &serde_json::to_string(&services().rooms.edus.typing.typings_all(&room_id)?) + &serde_json::to_string(&services().rooms.edus.typing.typings_all(room_id)?) .expect("event is valid, we just created it"), ) .expect("event is valid, we just created it"), @@ -1056,7 +1057,7 @@ async fn load_joined_room( // Save the state after this sync so we can send the correct state diff next sync services().rooms.user.associate_token_shortstatehash( - &room_id, + room_id, next_batch, current_shortstatehash, )?; @@ -1065,7 +1066,7 @@ async fn load_joined_room( account_data: RoomAccountData { events: services() .account_data - .changes_since(Some(&room_id), &sender_user, since)? + .changes_since(Some(room_id), sender_user, since)? .into_iter() .filter_map(|(_, v)| { serde_json::from_str(v.json().get()) @@ -1110,13 +1111,13 @@ fn load_timeline( if services() .rooms .timeline - .last_timeline_count(&sender_user, &room_id)? + .last_timeline_count(sender_user, room_id)? > roomsincecount { let mut non_timeline_pdus = services() .rooms .timeline - .pdus_until(&sender_user, &room_id, PduCount::max())? + .pdus_until(sender_user, room_id, PduCount::max())? .filter_map(|r| { // Filter out buggy events if r.is_err() { @@ -1172,7 +1173,6 @@ fn share_encrypted_room( pub async fn sync_events_v4_route( body: Ruma, ) -> Result> { - dbg!(&body.body); let sender_user = body.sender_user.expect("user is authenticated"); let sender_device = body.sender_device.expect("user is authenticated"); let mut body = body.body; @@ -1232,7 +1232,7 @@ pub async fn sync_events_v4_route( for room_id in &all_joined_rooms { let current_shortstatehash = - if let Some(s) = services().rooms.state.get_room_shortstatehash(&room_id)? { + if let Some(s) = services().rooms.state.get_room_shortstatehash(room_id)? { s } else { error!("Room {} has no state", room_id); @@ -1242,7 +1242,7 @@ pub async fn sync_events_v4_route( let since_shortstatehash = services() .rooms .user - .get_token_shortstatehash(&room_id, globalsince)?; + .get_token_shortstatehash(room_id, globalsince)?; let since_sender_member: Option = since_shortstatehash .and_then(|shortstatehash| { @@ -1331,7 +1331,7 @@ pub async fn sync_events_v4_route( if !share_encrypted_room( &sender_user, &user_id, - &room_id, + room_id, )? { device_list_changes.insert(user_id); } @@ -1352,7 +1352,7 @@ pub async fn sync_events_v4_route( services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .flatten() .filter(|user_id| { // Don't send key updates from the sender to the sender @@ -1360,7 +1360,7 @@ pub async fn sync_events_v4_route( }) .filter(|user_id| { // Only send keys if the sender doesn't share an encrypted room with the target already - !share_encrypted_room(&sender_user, user_id, &room_id) + !share_encrypted_room(&sender_user, user_id, room_id) .unwrap_or(false) }), ); @@ -1451,7 +1451,7 @@ pub async fn sync_events_v4_route( } sync_events::v4::SyncOp { op: SlidingOp::Sync, - range: Some(r.clone()), + range: Some(r), index: None, room_ids, room_id: None, @@ -1523,7 +1523,7 @@ pub async fn sync_events_v4_route( let roomsincecount = PduCount::Normal(*roomsince); let (timeline_pdus, limited) = - load_timeline(&sender_user, &room_id, roomsincecount, *timeline_limit)?; + load_timeline(&sender_user, room_id, roomsincecount, *timeline_limit)?; if roomsince != &0 && timeline_pdus.is_empty() { continue; @@ -1555,63 +1555,58 @@ pub async fn sync_events_v4_route( let required_state = required_state_request .iter() - .map(|state| { + .flat_map(|state| { services() .rooms .state_accessor - .room_state_get(&room_id, &state.0, &state.1) + .room_state_get(room_id, &state.0, &state.1) + .ok() + .flatten() + .map(|state| state.to_sync_state_event()) }) - .filter_map(|r| r.ok()) - .filter_map(|o| o) - .map(|state| state.to_sync_state_event()) .collect(); // Heroes let heroes = services() .rooms .state_cache - .room_members(&room_id) + .room_members(room_id) .filter_map(|r| r.ok()) .filter(|member| member != &sender_user) - .map(|member| { - Ok::<_, Error>( - services() - .rooms - .state_accessor - .get_member(&room_id, &member)? - .map(|memberevent| { - ( - memberevent - .displayname - .unwrap_or_else(|| member.to_string()), - memberevent.avatar_url, - ) - }), - ) + .flat_map(|member| { + services() + .rooms + .state_accessor + .get_member(room_id, &member) + .ok() + .flatten() + .map(|memberevent| { + ( + memberevent + .displayname + .unwrap_or_else(|| member.to_string()), + memberevent.avatar_url, + ) + }) }) - .filter_map(|r| r.ok()) - .filter_map(|o| o) .take(5) .collect::>(); - let name = if heroes.len() > 1 { - let last = heroes[0].0.clone(); - Some( - heroes[1..] + let name = match &heroes[..] { + [] => None, + [only] => Some(only.0.clone()), + [firsts @ .., last] => Some( + firsts .iter() .map(|h| h.0.clone()) .collect::>() .join(", ") + " and " - + &last, - ) - } else if heroes.len() == 1 { - Some(heroes[0].0.clone()) - } else { - None + + &last.0, + ), }; - let avatar = if heroes.len() == 1 { - heroes[0].1.clone() + let avatar = if let [only] = &heroes[..] { + only.1.clone() } else { None }; @@ -1619,15 +1614,11 @@ pub async fn sync_events_v4_route( rooms.insert( room_id.clone(), sync_events::v4::SlidingSyncRoom { - name: services() - .rooms - .state_accessor - .get_name(&room_id)? - .or_else(|| name), + name: services().rooms.state_accessor.get_name(room_id)?.or(name), avatar: services() .rooms .state_accessor - .get_avatar(&room_id)? + .get_avatar(room_id)? .map_or(avatar, |a| a.url), initial: Some(roomsince == &0), is_dm: None, @@ -1637,7 +1628,7 @@ pub async fn sync_events_v4_route( services() .rooms .user - .highlight_count(&sender_user, &room_id)? + .highlight_count(&sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ), @@ -1645,7 +1636,7 @@ pub async fn sync_events_v4_route( services() .rooms .user - .notification_count(&sender_user, &room_id)? + .notification_count(&sender_user, room_id)? .try_into() .expect("notification count can't go that high"), ), @@ -1658,7 +1649,7 @@ pub async fn sync_events_v4_route( (services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or(0) as u32) .into(), ), @@ -1666,7 +1657,7 @@ pub async fn sync_events_v4_route( (services() .rooms .state_cache - .room_invited_count(&room_id)? + .room_invited_count(room_id)? .unwrap_or(0) as u32) .into(), ), @@ -1689,7 +1680,7 @@ pub async fn sync_events_v4_route( let _ = tokio::time::timeout(duration, watcher).await; } - Ok(dbg!(sync_events::v4::Response { + Ok(sync_events::v4::Response { initial: globalsince == 0, txn_id: body.txn_id.clone(), pos: next_batch.to_string(), @@ -1744,5 +1735,5 @@ pub async fn sync_events_v4_route( }, }, delta_token: None, - })) + }) } diff --git a/src/api/server_server.rs b/src/api/server_server.rs index f29de32b..db17d586 100644 --- a/src/api/server_server.rs +++ b/src/api/server_server.rs @@ -55,7 +55,7 @@ use std::{ time::{Duration, Instant, SystemTime}, }; -use tracing::{debug, error, trace, warn}; +use tracing::{debug, error, warn}; /// Wraps either an literal IP address plus port, or a hostname plus complement /// (colon-plus-port if it was specified). @@ -341,7 +341,7 @@ fn add_port_to_hostname(destination_str: &str) -> FedDest { } /// Returns: actual_destination, host header -/// Implemented according to the specification at https://matrix.org/docs/spec/server_server/r0.1.4#resolving-server-names +/// Implemented according to the specification at /// Numbers in comments below refer to bullet points in linked section of specification async fn find_actual_destination(destination: &'_ ServerName) -> (FedDest, FedDest) { debug!("Finding actual destination for {destination}"); @@ -666,7 +666,7 @@ pub fn parse_incoming_pdu( let room_version_id = services().rooms.state.get_room_version(&room_id)?; - let (event_id, value) = match gen_event_id_canonical_json(&pdu, &room_version_id) { + let (event_id, value) = match gen_event_id_canonical_json(pdu, &room_version_id) { Ok(t) => t, Err(_) => { // Event could not be converted to canonical json @@ -724,7 +724,7 @@ pub async fn send_transaction_message_route( continue; } - let r = parse_incoming_pdu(&pdu); + let r = parse_incoming_pdu(pdu); let (event_id, value, room_id) = match r { Ok(t) => t, Err(e) => { @@ -992,7 +992,7 @@ pub async fn get_event_route( if !services().rooms.state_accessor.server_can_see_event( sender_servername, - &room_id, + room_id, &body.event_id, )? { return Err(Error::BadRequest( @@ -1058,7 +1058,7 @@ pub async fn get_backfill_route( let all_events = services() .rooms .timeline - .pdus_until(&user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? + .pdus_until(user_id!("@doesntmatter:conduit.rs"), &body.room_id, until)? .take(limit.try_into().unwrap()); let events = all_events @@ -1075,7 +1075,7 @@ pub async fn get_backfill_route( }) .map(|(_, pdu)| services().rooms.timeline.get_pdu_json(&pdu.event_id)) .filter_map(|r| r.ok().flatten()) - .map(|pdu| PduEvent::convert_to_outgoing_federation_event(pdu)) + .map(PduEvent::convert_to_outgoing_federation_event) .collect(); Ok(get_backfill::v1::Response { diff --git a/src/config/proxy.rs b/src/config/proxy.rs index dcf304e9..c03463e7 100644 --- a/src/config/proxy.rs +++ b/src/config/proxy.rs @@ -29,7 +29,9 @@ use crate::Result; /// would be used for `ordinary.onion`, `matrix.myspecial.onion`, but not `hello.myspecial.onion`. #[derive(Clone, Debug, Deserialize)] #[serde(rename_all = "snake_case")] +#[derive(Default)] pub enum ProxyConfig { + #[default] None, Global { #[serde(deserialize_with = "crate::utils::deserialize_from_str")] @@ -48,11 +50,6 @@ impl ProxyConfig { }) } } -impl Default for ProxyConfig { - fn default() -> Self { - ProxyConfig::None - } -} #[derive(Clone, Debug, Deserialize)] pub struct PartialProxyConfig { diff --git a/src/database/abstraction/watchers.rs b/src/database/abstraction/watchers.rs index 55cb60b3..01156abd 100644 --- a/src/database/abstraction/watchers.rs +++ b/src/database/abstraction/watchers.rs @@ -8,6 +8,7 @@ use tokio::sync::watch; #[derive(Default)] pub(super) struct Watchers { + #[allow(clippy::type_complexity)] watchers: RwLock, (watch::Sender<()>, watch::Receiver<()>)>>, } diff --git a/src/database/key_value/rooms/state_accessor.rs b/src/database/key_value/rooms/state_accessor.rs index ad08f46e..fe40b937 100644 --- a/src/database/key_value/rooms/state_accessor.rs +++ b/src/database/key_value/rooms/state_accessor.rs @@ -20,7 +20,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let parsed = services() .rooms .state_compressor - .parse_compressed_state_event(&compressed)?; + .parse_compressed_state_event(compressed)?; result.insert(parsed.0, parsed.1); i += 1; @@ -49,7 +49,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { let (_, eventid) = services() .rooms .state_compressor - .parse_compressed_state_event(&compressed)?; + .parse_compressed_state_event(compressed)?; if let Some(pdu) = services().rooms.timeline.get_pdu(&eventid)? { result.insert( ( @@ -101,7 +101,7 @@ impl service::rooms::state_accessor::Data for KeyValueDatabase { services() .rooms .state_compressor - .parse_compressed_state_event(&compressed) + .parse_compressed_state_event(compressed) .ok() .map(|(_, id)| id) })) diff --git a/src/database/key_value/rooms/state_cache.rs b/src/database/key_value/rooms/state_cache.rs index d0ea0c2c..3dcaf4ae 100644 --- a/src/database/key_value/rooms/state_cache.rs +++ b/src/database/key_value/rooms/state_cache.rs @@ -471,6 +471,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } /// Returns an iterator over all rooms a user was invited to. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] fn rooms_invited<'a>( &'a self, @@ -549,6 +550,7 @@ impl service::rooms::state_cache::Data for KeyValueDatabase { } /// Returns an iterator over all rooms a user left. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] fn rooms_left<'a>( &'a self, diff --git a/src/database/key_value/rooms/threads.rs b/src/database/key_value/rooms/threads.rs index 4be289b0..5e3dc970 100644 --- a/src/database/key_value/rooms/threads.rs +++ b/src/database/key_value/rooms/threads.rs @@ -10,7 +10,7 @@ impl service::rooms::threads::Data for KeyValueDatabase { user_id: &'a UserId, room_id: &'a RoomId, until: u64, - include: &'a IncludeThreads, + _include: &'a IncludeThreads, ) -> Result> + 'a>> { let prefix = services() .rooms @@ -27,7 +27,7 @@ impl service::rooms::threads::Data for KeyValueDatabase { self.threadid_userids .iter_from(¤t, true) .take_while(move |(k, _)| k.starts_with(&prefix)) - .map(move |(pduid, users)| { + .map(move |(pduid, _users)| { let count = utils::u64_from_bytes(&pduid[(mem::size_of::())..]) .map_err(|_| Error::bad_database("Invalid pduid in threadid_userids."))?; let mut pdu = services() @@ -52,13 +52,13 @@ impl service::rooms::threads::Data for KeyValueDatabase { .collect::>() .join(&[0xff][..]); - self.threadid_userids.insert(&root_id, &users)?; + self.threadid_userids.insert(root_id, &users)?; Ok(()) } fn get_participants(&self, root_id: &[u8]) -> Result>> { - if let Some(users) = self.threadid_userids.get(&root_id)? { + if let Some(users) = self.threadid_userids.get(root_id)? { Ok(Some( users .split(|b| *b == 0xff) diff --git a/src/database/key_value/rooms/timeline.rs b/src/database/key_value/rooms/timeline.rs index ba1e85ef..f322d430 100644 --- a/src/database/key_value/rooms/timeline.rs +++ b/src/database/key_value/rooms/timeline.rs @@ -39,11 +39,10 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the `count` of this pdu's id. fn get_pdu_count(&self, event_id: &EventId) -> Result> { - Ok(self - .eventid_pduid + self.eventid_pduid .get(event_id.as_bytes())? .map(|pdu_id| pdu_count(&pdu_id)) - .transpose()?) + .transpose() } /// Returns the json of a pdu. @@ -80,7 +79,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { /// Returns the pdu's id. fn get_pdu_id(&self, event_id: &EventId) -> Result>> { - Ok(self.eventid_pduid.get(event_id.as_bytes())?) + self.eventid_pduid.get(event_id.as_bytes()) } /// Returns the pdu. @@ -230,7 +229,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, until: PduCount, ) -> Result> + 'a>> { - let (prefix, current) = count_to_id(&room_id, until, 1, true)?; + let (prefix, current) = count_to_id(room_id, until, 1, true)?; let user_id = user_id.to_owned(); @@ -257,7 +256,7 @@ impl service::rooms::timeline::Data for KeyValueDatabase { room_id: &RoomId, from: PduCount, ) -> Result> + 'a>> { - let (prefix, current) = count_to_id(&room_id, from, 1, false)?; + let (prefix, current) = count_to_id(room_id, from, 1, false)?; let user_id = user_id.to_owned(); diff --git a/src/main.rs b/src/main.rs index c74d6ddb..683e0914 100644 --- a/src/main.rs +++ b/src/main.rs @@ -238,7 +238,7 @@ async fn spawn_task( .map_err(|_| StatusCode::INTERNAL_SERVER_ERROR) } -async fn unrecognized_method( +async fn unrecognized_method( req: axum::http::Request, next: axum::middleware::Next, ) -> std::result::Result { diff --git a/src/service/admin/mod.rs b/src/service/admin/mod.rs index b22f8ed4..64958fc7 100644 --- a/src/service/admin/mod.rs +++ b/src/service/admin/mod.rs @@ -50,7 +50,7 @@ enum AdminCommand { /// Registering a new bridge using the ID of an existing bridge will replace /// the old one. /// - /// [commandbody] + /// [commandbody]() /// # ``` /// # yaml content here /// # ``` @@ -96,7 +96,7 @@ enum AdminCommand { /// Removing a mass amount of users from a room may cause a significant amount of leave events. /// The time to leave rooms may depend significantly on joined rooms and servers. /// - /// [commandbody] + /// [commandbody]() /// # ``` /// # User list here /// # ``` @@ -121,7 +121,7 @@ enum AdminCommand { /// The PDU event is only checked for validity and is not added to the /// database. /// - /// [commandbody] + /// [commandbody]() /// # ``` /// # PDU json content here /// # ``` @@ -165,14 +165,14 @@ enum AdminCommand { EnableRoom { room_id: Box }, /// Verify json signatures - /// [commandbody] + /// [commandbody]() /// # ``` /// # json here /// # ``` SignJson, /// Verify json signatures - /// [commandbody] + /// [commandbody]() /// # ``` /// # json here /// # ``` @@ -858,12 +858,15 @@ impl Service { .expect("Regex compilation should not fail"); let text = re.replace_all(&text, "$1: $4"); - // Look for a `[commandbody]` tag. If it exists, use all lines below it that + // Look for a `[commandbody]()` tag. If it exists, use all lines below it that // start with a `#` in the USAGE section. let mut text_lines: Vec<&str> = text.lines().collect(); let mut command_body = String::new(); - if let Some(line_index) = text_lines.iter().position(|line| *line == "[commandbody]") { + if let Some(line_index) = text_lines + .iter() + .position(|line| *line == "[commandbody]()") + { text_lines.remove(line_index); while text_lines diff --git a/src/service/rooms/edus/read_receipt/data.rs b/src/service/rooms/edus/read_receipt/data.rs index a183d196..044dad82 100644 --- a/src/service/rooms/edus/read_receipt/data.rs +++ b/src/service/rooms/edus/read_receipt/data.rs @@ -11,6 +11,7 @@ pub trait Data: Send + Sync { ) -> Result<()>; /// Returns an iterator over the most recent read_receipts in a room that happened after the event with id `since`. + #[allow(clippy::type_complexity)] fn readreceipts_since<'a>( &'a self, room_id: &RoomId, diff --git a/src/service/rooms/event_handler/mod.rs b/src/service/rooms/event_handler/mod.rs index 29199781..e7db6f78 100644 --- a/src/service/rooms/event_handler/mod.rs +++ b/src/service/rooms/event_handler/mod.rs @@ -92,7 +92,7 @@ impl Service { )); } - services().rooms.event_handler.acl_check(origin, &room_id)?; + services().rooms.event_handler.acl_check(origin, room_id)?; // 1. Skip the PDU if we already have it as a timeline event if let Some(pdu_id) = services().rooms.timeline.get_pdu_id(event_id)? { @@ -276,6 +276,7 @@ impl Service { r } + #[allow(clippy::type_complexity, clippy::too_many_arguments)] #[tracing::instrument(skip(self, create_event, value, pub_key_map))] fn handle_outlier_pdu<'a>( &'a self, @@ -1009,6 +1010,7 @@ impl Service { /// b. Look at outlier pdu tree /// c. Ask origin server over federation /// d. TODO: Ask other servers over federation? + #[allow(clippy::type_complexity)] #[tracing::instrument(skip_all)] pub(crate) fn fetch_and_handle_outliers<'a>( &'a self, diff --git a/src/service/rooms/lazy_loading/mod.rs b/src/service/rooms/lazy_loading/mod.rs index e6e4f896..c51a57e9 100644 --- a/src/service/rooms/lazy_loading/mod.rs +++ b/src/service/rooms/lazy_loading/mod.rs @@ -14,6 +14,7 @@ use super::timeline::PduCount; pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub lazy_load_waiting: Mutex>>, } diff --git a/src/service/rooms/pdu_metadata/data.rs b/src/service/rooms/pdu_metadata/data.rs index 6c4cb3ce..a4df34cc 100644 --- a/src/service/rooms/pdu_metadata/data.rs +++ b/src/service/rooms/pdu_metadata/data.rs @@ -5,6 +5,7 @@ use ruma::{EventId, RoomId, UserId}; pub trait Data: Send + Sync { fn add_relation(&self, from: u64, to: u64) -> Result<()>; + #[allow(clippy::type_complexity)] fn relations_until<'a>( &'a self, user_id: &'a UserId, diff --git a/src/service/rooms/pdu_metadata/mod.rs b/src/service/rooms/pdu_metadata/mod.rs index 9ce74f4d..411f4f54 100644 --- a/src/service/rooms/pdu_metadata/mod.rs +++ b/src/service/rooms/pdu_metadata/mod.rs @@ -40,6 +40,7 @@ impl Service { } } + #[allow(clippy::too_many_arguments)] pub fn paginate_relations_with_filter( &self, sender_user: &UserId, @@ -82,7 +83,7 @@ impl Service { services() .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) .take_while(|&(k, _)| Some(k) != to) // Stop at `to` @@ -106,7 +107,7 @@ impl Service { let events_before: Vec<_> = services() .rooms .pdu_metadata - .relations_until(sender_user, &room_id, target, from)? + .relations_until(sender_user, room_id, target, from)? .filter(|r| { r.as_ref().map_or(true, |(_, pdu)| { filter_event_type.as_ref().map_or(true, |t| &pdu.kind == t) @@ -129,7 +130,7 @@ impl Service { services() .rooms .state_accessor - .user_can_see_event(sender_user, &room_id, &pdu.event_id) + .user_can_see_event(sender_user, room_id, &pdu.event_id) .unwrap_or(false) }) .take_while(|&(k, _)| Some(k) != to) // Stop at `to` diff --git a/src/service/rooms/search/data.rs b/src/service/rooms/search/data.rs index 6eef38fb..7ea7e3d1 100644 --- a/src/service/rooms/search/data.rs +++ b/src/service/rooms/search/data.rs @@ -4,6 +4,7 @@ use ruma::RoomId; pub trait Data: Send + Sync { fn index_pdu(&self, shortroomid: u64, pdu_id: &[u8], message_body: &str) -> Result<()>; + #[allow(clippy::type_complexity)] fn search_pdus<'a>( &'a self, room_id: &RoomId, diff --git a/src/service/rooms/spaces/mod.rs b/src/service/rooms/spaces/mod.rs index 53232f46..615e9ca0 100644 --- a/src/service/rooms/spaces/mod.rs +++ b/src/service/rooms/spaces/mod.rs @@ -197,7 +197,7 @@ impl Service { if let Ok(response) = services() .sending .send_federation_request( - &server, + server, federation::space::get_hierarchy::v1::Request { room_id: current_room.to_owned(), suggested_only, @@ -235,7 +235,7 @@ impl Service { .room .allowed_room_ids .into_iter() - .map(|room| AllowRule::room_membership(room)) + .map(AllowRule::room_membership) .collect(), }) } @@ -245,7 +245,7 @@ impl Service { .room .allowed_room_ids .into_iter() - .map(|room| AllowRule::room_membership(room)) + .map(AllowRule::room_membership) .collect(), }) } @@ -313,7 +313,7 @@ impl Service { canonical_alias: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCanonicalAlias, "")? + .room_state_get(room_id, &StateEventType::RoomCanonicalAlias, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomCanonicalAliasEventContent| c.alias) @@ -321,11 +321,11 @@ impl Service { Error::bad_database("Invalid canonical alias event in database.") }) })?, - name: services().rooms.state_accessor.get_name(&room_id)?, + name: services().rooms.state_accessor.get_name(room_id)?, num_joined_members: services() .rooms .state_cache - .room_joined_count(&room_id)? + .room_joined_count(room_id)? .unwrap_or_else(|| { warn!("Room {} has no member count", room_id); 0 @@ -336,7 +336,7 @@ impl Service { topic: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomTopic, "")? + .room_state_get(room_id, &StateEventType::RoomTopic, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomTopicEventContent| Some(c.topic)) @@ -348,7 +348,7 @@ impl Service { world_readable: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| { @@ -363,7 +363,7 @@ impl Service { guest_can_join: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomGuestAccess, "")? + .room_state_get(room_id, &StateEventType::RoomGuestAccess, "")? .map_or(Ok(false), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomGuestAccessEventContent| { @@ -376,7 +376,7 @@ impl Service { avatar_url: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .room_state_get(room_id, &StateEventType::RoomAvatar, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomAvatarEventContent| c.url) @@ -389,7 +389,7 @@ impl Service { let join_rule = services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomJoinRules, "")? + .room_state_get(room_id, &StateEventType::RoomJoinRules, "")? .map(|s| { serde_json::from_str(s.content.get()) .map(|c: RoomJoinRulesEventContent| c.join_rule) @@ -415,7 +415,7 @@ impl Service { room_type: services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomCreate, "")? + .room_state_get(room_id, &StateEventType::RoomCreate, "")? .map(|s| { serde_json::from_str::(s.content.get()).map_err(|e| { error!("Invalid room create event in database: {}", e); @@ -455,7 +455,7 @@ impl Service { SpaceRoomJoinRule::Invite => services() .rooms .state_cache - .is_joined(sender_user, &room_id)?, + .is_joined(sender_user, room_id)?, _ => false, }; @@ -479,17 +479,14 @@ impl Service { match join_rule { JoinRule::Restricted(r) => { for rule in &r.allow { - match rule { - join_rules::AllowRule::RoomMembership(rm) => { - if let Ok(true) = services() - .rooms - .state_cache - .is_joined(sender_user, &rm.room_id) - { - return Ok(true); - } + if let join_rules::AllowRule::RoomMembership(rm) = rule { + if let Ok(true) = services() + .rooms + .state_cache + .is_joined(sender_user, &rm.room_id) + { + return Ok(true); } - _ => {} } } diff --git a/src/service/rooms/state/mod.rs b/src/service/rooms/state/mod.rs index 48c60203..c209eb5a 100644 --- a/src/service/rooms/state/mod.rs +++ b/src/service/rooms/state/mod.rs @@ -41,7 +41,7 @@ impl Service { services() .rooms .state_compressor - .parse_compressed_state_event(&new) + .parse_compressed_state_event(new) .ok() .map(|(_, id)| id) }) { @@ -412,7 +412,7 @@ impl Service { services() .rooms .state_compressor - .parse_compressed_state_event(&compressed) + .parse_compressed_state_event(compressed) .ok() }) .filter_map(|(shortstatekey, event_id)| { diff --git a/src/service/rooms/state_accessor/mod.rs b/src/service/rooms/state_accessor/mod.rs index a4a62fe4..b00dc58c 100644 --- a/src/service/rooms/state_accessor/mod.rs +++ b/src/service/rooms/state_accessor/mod.rs @@ -180,7 +180,7 @@ impl Service { return Ok(*visibility); } - let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; + let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; let history_visibility = self .state_get(shortstatehash, &StateEventType::RoomHistoryVisibility, "")? @@ -197,11 +197,11 @@ impl Service { HistoryVisibility::Shared => currently_member, HistoryVisibility::Invited => { // Allow if any member on requesting server was AT LEAST invited, else deny - self.user_was_invited(shortstatehash, &user_id) + self.user_was_invited(shortstatehash, user_id) } HistoryVisibility::Joined => { // Allow if any member on requested server was joined, else deny - self.user_was_joined(shortstatehash, &user_id) + self.user_was_joined(shortstatehash, user_id) } _ => { error!("Unknown history visibility {history_visibility}"); @@ -221,10 +221,10 @@ impl Service { /// the room's history_visibility at that event's state. #[tracing::instrument(skip(self, user_id, room_id))] pub fn user_can_see_state_events(&self, user_id: &UserId, room_id: &RoomId) -> Result { - let currently_member = services().rooms.state_cache.is_joined(&user_id, &room_id)?; + let currently_member = services().rooms.state_cache.is_joined(user_id, room_id)?; let history_visibility = self - .room_state_get(&room_id, &StateEventType::RoomHistoryVisibility, "")? + .room_state_get(room_id, &StateEventType::RoomHistoryVisibility, "")? .map_or(Ok(HistoryVisibility::Shared), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomHistoryVisibilityEventContent| c.history_visibility) @@ -276,7 +276,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomName, "")? + .room_state_get(room_id, &StateEventType::RoomName, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map(|c: RoomNameEventContent| c.name) @@ -288,7 +288,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomAvatar, "")? + .room_state_get(room_id, &StateEventType::RoomAvatar, "")? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map_err(|_| Error::bad_database("Invalid room avatar event in database.")) @@ -303,7 +303,7 @@ impl Service { services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomMember, user_id.as_str())? + .room_state_get(room_id, &StateEventType::RoomMember, user_id.as_str())? .map_or(Ok(None), |s| { serde_json::from_str(s.content.get()) .map_err(|_| Error::bad_database("Invalid room member event in database.")) diff --git a/src/service/rooms/state_cache/data.rs b/src/service/rooms/state_cache/data.rs index d8bb4a44..8921909f 100644 --- a/src/service/rooms/state_cache/data.rs +++ b/src/service/rooms/state_cache/data.rs @@ -78,6 +78,7 @@ pub trait Data: Send + Sync { ) -> Box> + 'a>; /// Returns an iterator over all rooms a user was invited to. + #[allow(clippy::type_complexity)] fn rooms_invited<'a>( &'a self, user_id: &UserId, @@ -96,6 +97,7 @@ pub trait Data: Send + Sync { ) -> Result>>>; /// Returns an iterator over all rooms a user left. + #[allow(clippy::type_complexity)] fn rooms_left<'a>( &'a self, user_id: &UserId, diff --git a/src/service/rooms/state_compressor/mod.rs b/src/service/rooms/state_compressor/mod.rs index d29b020b..6118e06b 100644 --- a/src/service/rooms/state_compressor/mod.rs +++ b/src/service/rooms/state_compressor/mod.rs @@ -16,6 +16,7 @@ use self::data::StateDiff; pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub stateinfo_cache: Mutex< LruCache< u64, @@ -33,6 +34,7 @@ pub type CompressedStateEvent = [u8; 2 * size_of::()]; impl Service { /// Returns a stack with info on shortstatehash, full state, added diff and removed diff for the selected shortstatehash and each parent layer. + #[allow(clippy::type_complexity)] #[tracing::instrument(skip(self))] pub fn load_shortstatehash_info( &self, @@ -131,6 +133,7 @@ impl Service { /// * `statediffremoved` - Removed from base. Each vec is shortstatekey+shorteventid /// * `diff_to_sibling` - Approximately how much the diff grows each time for this layer /// * `parent_states` - A stack with info on shortstatehash, full state, added diff and removed diff for each parent layer + #[allow(clippy::type_complexity)] #[tracing::instrument(skip( self, statediffnew, @@ -164,7 +167,7 @@ impl Service { for removed in statediffremoved.iter() { if !parent_new.remove(removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed.clone()); + parent_removed.insert(*removed); } // Else it was added in the parent and we removed it again. We can forget this change } @@ -172,7 +175,7 @@ impl Service { for new in statediffnew.iter() { if !parent_removed.remove(new) { // It was not touched in the parent and we added it - parent_new.insert(new.clone()); + parent_new.insert(*new); } // Else it was removed in the parent and we added it again. We can forget this change } @@ -217,7 +220,7 @@ impl Service { for removed in statediffremoved.iter() { if !parent_new.remove(removed) { // It was not added in the parent and we removed it - parent_removed.insert(removed.clone()); + parent_removed.insert(*removed); } // Else it was added in the parent and we removed it again. We can forget this change } @@ -225,7 +228,7 @@ impl Service { for new in statediffnew.iter() { if !parent_removed.remove(new) { // It was not touched in the parent and we added it - parent_new.insert(new.clone()); + parent_new.insert(*new); } // Else it was removed in the parent and we added it again. We can forget this change } @@ -253,6 +256,7 @@ impl Service { } /// Returns the new shortstatehash, and the state diff from the previous room state + #[allow(clippy::type_complexity)] pub fn save_state( &self, room_id: &RoomId, diff --git a/src/service/rooms/threads/data.rs b/src/service/rooms/threads/data.rs index 9221e8e8..e7159de0 100644 --- a/src/service/rooms/threads/data.rs +++ b/src/service/rooms/threads/data.rs @@ -2,6 +2,7 @@ use crate::{PduEvent, Result}; use ruma::{api::client::threads::get_threads::v1::IncludeThreads, OwnedUserId, RoomId, UserId}; pub trait Data: Send + Sync { + #[allow(clippy::type_complexity)] fn threads_until<'a>( &'a self, user_id: &'a UserId, diff --git a/src/service/rooms/threads/mod.rs b/src/service/rooms/threads/mod.rs index fb703839..c6193bc8 100644 --- a/src/service/rooms/threads/mod.rs +++ b/src/service/rooms/threads/mod.rs @@ -26,7 +26,7 @@ impl Service { self.db.threads_until(user_id, room_id, until, include) } - pub fn add_to_thread<'a>(&'a self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { + pub fn add_to_thread(&self, root_event_id: &EventId, pdu: &PduEvent) -> Result<()> { let root_id = &services() .rooms .timeline @@ -103,7 +103,7 @@ impl Service { } let mut users = Vec::new(); - if let Some(userids) = self.db.get_participants(&root_id)? { + if let Some(userids) = self.db.get_participants(root_id)? { users.extend_from_slice(&userids); users.push(pdu.sender.clone()); } else { diff --git a/src/service/rooms/timeline/data.rs b/src/service/rooms/timeline/data.rs index afa2cfbf..6290b8cc 100644 --- a/src/service/rooms/timeline/data.rs +++ b/src/service/rooms/timeline/data.rs @@ -66,6 +66,7 @@ pub trait Data: Send + Sync { /// Returns an iterator over all events and their tokens in a room that happened before the /// event with id `until` in reverse-chronological order. + #[allow(clippy::type_complexity)] fn pdus_until<'a>( &'a self, user_id: &UserId, @@ -75,6 +76,7 @@ pub trait Data: Send + Sync { /// Returns an iterator over all events in a room that happened after the event with id `from` /// in chronological order. + #[allow(clippy::type_complexity)] fn pdus_after<'a>( &'a self, user_id: &UserId, diff --git a/src/service/rooms/timeline/mod.rs b/src/service/rooms/timeline/mod.rs index 25e1c54d..61b73378 100644 --- a/src/service/rooms/timeline/mod.rs +++ b/src/service/rooms/timeline/mod.rs @@ -58,8 +58,8 @@ impl PduCount { } pub fn try_from_string(token: &str) -> Result { - if token.starts_with('-') { - token[1..].parse().map(PduCount::Backfilled) + if let Some(stripped) = token.strip_prefix('-') { + stripped.parse().map(PduCount::Backfilled) } else { token.parse().map(PduCount::Normal) } @@ -112,7 +112,7 @@ pub struct Service { impl Service { #[tracing::instrument(skip(self))] pub fn first_pdu_in_room(&self, room_id: &RoomId) -> Result>> { - self.all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? + self.all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? .next() .map(|o| o.map(|(_, p)| Arc::new(p))) .transpose() @@ -458,7 +458,7 @@ impl Service { let to_conduit = body.starts_with(&format!("{server_user}: ")) || body.starts_with(&format!("{server_user} ")) || body == format!("{server_user}:") - || body == format!("{server_user}"); + || body == server_user; // This will evaluate to false if the emergency password is set up so that // the administrator can execute commands as conduit @@ -842,7 +842,7 @@ impl Service { let target = pdu .state_key() - .filter(|v| v.starts_with("@")) + .filter(|v| v.starts_with('@')) .unwrap_or(sender.as_str()); let server_name = services().globals.server_name(); let server_user = format!("@conduit:{}", server_name); @@ -850,7 +850,7 @@ impl Service { .map_err(|_| Error::bad_database("Invalid content in pdu."))?; if content.membership == MembershipState::Leave { - if target == &server_user { + if target == server_user { warn!("Conduit user cannot leave from admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -876,7 +876,7 @@ impl Service { } if content.membership == MembershipState::Ban && pdu.state_key().is_some() { - if target == &server_user { + if target == server_user { warn!("Conduit user cannot be banned in admins room"); return Err(Error::BadRequest( ErrorKind::Forbidden, @@ -1048,7 +1048,7 @@ impl Service { #[tracing::instrument(skip(self, room_id))] pub async fn backfill_if_required(&self, room_id: &RoomId, from: PduCount) -> Result<()> { let first_pdu = self - .all_pdus(&user_id!("@doesntmatter:conduit.rs"), &room_id)? + .all_pdus(user_id!("@doesntmatter:conduit.rs"), room_id)? .next() .expect("Room is not empty")?; @@ -1060,7 +1060,7 @@ impl Service { let power_levels: RoomPowerLevelsEventContent = services() .rooms .state_accessor - .room_state_get(&room_id, &StateEventType::RoomPowerLevels, "")? + .room_state_get(room_id, &StateEventType::RoomPowerLevels, "")? .map(|ev| { serde_json::from_str(ev.content.get()) .map_err(|_| Error::bad_database("invalid m.room.power_levels event")) @@ -1091,11 +1091,9 @@ impl Service { .await; match response { Ok(response) => { - let mut pub_key_map = RwLock::new(BTreeMap::new()); + let pub_key_map = RwLock::new(BTreeMap::new()); for pdu in response.pdus { - if let Err(e) = self - .backfill_pdu(backfill_server, pdu, &mut pub_key_map) - .await + if let Err(e) = self.backfill_pdu(backfill_server, pdu, &pub_key_map).await { warn!("Failed to add backfilled pdu: {e}"); } @@ -1142,7 +1140,7 @@ impl Service { services() .rooms .event_handler - .handle_incoming_pdu(origin, &event_id, &room_id, value, false, &pub_key_map) + .handle_incoming_pdu(origin, &event_id, &room_id, value, false, pub_key_map) .await?; let value = self.get_pdu_json(&event_id)?.expect("We just created it"); @@ -1175,24 +1173,21 @@ impl Service { drop(insert_lock); - match pdu.kind { - TimelineEventType::RoomMessage => { - #[derive(Deserialize)] - struct ExtractBody { - body: Option, - } + if pdu.kind == TimelineEventType::RoomMessage { + #[derive(Deserialize)] + struct ExtractBody { + body: Option, + } - let content = serde_json::from_str::(pdu.content.get()) - .map_err(|_| Error::bad_database("Invalid content in pdu."))?; + let content = serde_json::from_str::(pdu.content.get()) + .map_err(|_| Error::bad_database("Invalid content in pdu."))?; - if let Some(body) = content.body { - services() - .rooms - .search - .index_pdu(shortroomid, &pdu_id, &body)?; - } + if let Some(body) = content.body { + services() + .rooms + .search + .index_pdu(shortroomid, &pdu_id, &body)?; } - _ => {} } drop(mutex_lock); diff --git a/src/service/sending/data.rs b/src/service/sending/data.rs index 2e574e23..8b4d236f 100644 --- a/src/service/sending/data.rs +++ b/src/service/sending/data.rs @@ -5,6 +5,7 @@ use crate::Result; use super::{OutgoingKind, SendingEventType}; pub trait Data: Send + Sync { + #[allow(clippy::type_complexity)] fn active_requests<'a>( &'a self, ) -> Box, OutgoingKind, SendingEventType)>> + 'a>; diff --git a/src/service/users/mod.rs b/src/service/users/mod.rs index 6faa6d8e..dc34d533 100644 --- a/src/service/users/mod.rs +++ b/src/service/users/mod.rs @@ -34,6 +34,7 @@ pub struct SlidingSyncCache { pub struct Service { pub db: &'static dyn Data, + #[allow(clippy::type_complexity)] pub connections: Mutex>>>, }