From 7a9660367a2d8c5694d4c81b5eabbf9e24f93fca Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 18 Nov 2025 18:04:26 -0700 Subject: [PATCH 01/47] Capitalize Synapse in CHANGES.md --- CHANGES.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index b492695196..770c468112 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -# synapse 1.143.0rc2 (2025-11-18) +# Synapse 1.143.0rc2 (2025-11-18) ## Internal Changes From e39fba61a796d38f82ba1053041088e743fbefc2 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 21 Nov 2025 10:51:19 -0600 Subject: [PATCH 02/47] Refactor `scripts-dev/complement.sh` logic to avoid `exit` (#19209) This is useful so that the script can be sourced by other scripts without exiting the calling subshell (composable). This is split out from https://github.com/element-hq/synapse/pull/19208 to make easy to understand PR's and build up to where we want to go. --- changelog.d/19209.misc | 1 + scripts-dev/complement.sh | 346 ++++++++++++++++++++------------------ 2 files changed, 179 insertions(+), 168 deletions(-) create mode 100644 changelog.d/19209.misc diff --git a/changelog.d/19209.misc b/changelog.d/19209.misc new file mode 100644 index 0000000000..e64ca85d1d --- /dev/null +++ b/changelog.d/19209.misc @@ -0,0 +1 @@ +Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable). diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index c4d678b142..adb5807325 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -72,153 +72,154 @@ For help on arguments to 'go test', run 'go help testflag'. EOF } -# parse our arguments -skip_docker_build="" -skip_complement_run="" -while [ $# -ge 1 ]; do +main() { + # parse our arguments + skip_docker_build="" + skip_complement_run="" + while [ $# -ge 1 ]; do arg=$1 case "$arg" in - "-h") - usage - exit 1 - ;; - "-f"|"--fast") - skip_docker_build=1 - ;; - "--build-only") - skip_complement_run=1 - ;; - "-e"|"--editable") - use_editable_synapse=1 - ;; - "--rebuild-editable") - rebuild_editable_synapse=1 - ;; - *) - # unknown arg: presumably an argument to gotest. break the loop. - break + "-h") + usage + return 1 + ;; + "-f"|"--fast") + skip_docker_build=1 + ;; + "--build-only") + skip_complement_run=1 + ;; + "-e"|"--editable") + use_editable_synapse=1 + ;; + "--rebuild-editable") + rebuild_editable_synapse=1 + ;; + *) + # unknown arg: presumably an argument to gotest. break the loop. + break esac shift -done - -# enable buildkit for the docker builds -export DOCKER_BUILDKIT=1 - -# Determine whether to use the docker or podman container runtime. -if [ -n "$PODMAN" ]; then - export CONTAINER_RUNTIME=podman - export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock - export BUILDAH_FORMAT=docker - export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal -else - export CONTAINER_RUNTIME=docker -fi + done -# Change to the repository root -cd "$(dirname $0)/.." - -# Check for a user-specified Complement checkout -if [[ -z "$COMPLEMENT_DIR" ]]; then - COMPLEMENT_REF=${COMPLEMENT_REF:-main} - echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..." - wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz - tar -xzf ${COMPLEMENT_REF}.tar.gz - COMPLEMENT_DIR=complement-${COMPLEMENT_REF} - echo "Checkout available at 'complement-${COMPLEMENT_REF}'" -fi + # enable buildkit for the docker builds + export DOCKER_BUILDKIT=1 + + # Determine whether to use the docker or podman container runtime. + if [ -n "$PODMAN" ]; then + export CONTAINER_RUNTIME=podman + export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock + export BUILDAH_FORMAT=docker + export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal + else + export CONTAINER_RUNTIME=docker + fi + + # Change to the repository root + cd "$(dirname $0)/.." + + # Check for a user-specified Complement checkout + if [[ -z "$COMPLEMENT_DIR" ]]; then + COMPLEMENT_REF=${COMPLEMENT_REF:-main} + echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..." + wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz + tar -xzf ${COMPLEMENT_REF}.tar.gz + COMPLEMENT_DIR=complement-${COMPLEMENT_REF} + echo "Checkout available at 'complement-${COMPLEMENT_REF}'" + fi -if [ -n "$use_editable_synapse" ]; then + if [ -n "$use_editable_synapse" ]; then if [[ -e synapse/synapse_rust.abi3.so ]]; then - # In an editable install, back up the host's compiled Rust module to prevent - # inconvenience; the container will overwrite the module with its own copy. - mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host - # And restore it on exit: - synapse_pkg=`realpath synapse` - trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT + # In an editable install, back up the host's compiled Rust module to prevent + # inconvenience; the container will overwrite the module with its own copy. + mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host + # And restore it on exit: + synapse_pkg=`realpath synapse` + trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT fi editable_mount="$(realpath .):/editable-src:z" if [ -n "$rebuild_editable_synapse" ]; then - unset skip_docker_build + unset skip_docker_build elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then - # complement-synapse-editable already exists: see if we can still use it: - # - The Rust module must still be importable; it will fail to import if the Rust source has changed. - # - The Poetry lock file must be the same (otherwise we assume dependencies have changed) - - # First set up the module in the right place for an editable installation. - $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so - - if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \ - && $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then - skip_docker_build=1 - else - echo "Editable Synapse image is stale. Will rebuild." - unset skip_docker_build - fi + # complement-synapse-editable already exists: see if we can still use it: + # - The Rust module must still be importable; it will fail to import if the Rust source has changed. + # - The Poetry lock file must be the same (otherwise we assume dependencies have changed) + + # First set up the module in the right place for an editable installation. + $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so + + if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \ + && $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then + skip_docker_build=1 + else + echo "Editable Synapse image is stale. Will rebuild." + unset skip_docker_build + fi fi -fi + fi -if [ -z "$skip_docker_build" ]; then + if [ -z "$skip_docker_build" ]; then if [ -n "$use_editable_synapse" ]; then - # Build a special image designed for use in development with editable - # installs. - $CONTAINER_RUNTIME build -t synapse-editable \ - -f "docker/editable.Dockerfile" . + # Build a special image designed for use in development with editable + # installs. + $CONTAINER_RUNTIME build -t synapse-editable \ + -f "docker/editable.Dockerfile" . - $CONTAINER_RUNTIME build -t synapse-workers-editable \ - --build-arg FROM=synapse-editable \ - -f "docker/Dockerfile-workers" . + $CONTAINER_RUNTIME build -t synapse-workers-editable \ + --build-arg FROM=synapse-editable \ + -f "docker/Dockerfile-workers" . - $CONTAINER_RUNTIME build -t complement-synapse-editable \ - --build-arg FROM=synapse-workers-editable \ - -f "docker/complement/Dockerfile" "docker/complement" + $CONTAINER_RUNTIME build -t complement-synapse-editable \ + --build-arg FROM=synapse-workers-editable \ + -f "docker/complement/Dockerfile" "docker/complement" - # Prepare the Rust module - $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so + # Prepare the Rust module + $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so else - # Build the base Synapse image from the local checkout - echo_if_github "::group::Build Docker image: matrixdotorg/synapse" - $CONTAINER_RUNTIME build -t matrixdotorg/synapse \ - --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \ - --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \ - -f "docker/Dockerfile" . - echo_if_github "::endgroup::" - - # Build the workers docker image (from the base Synapse image we just built). - echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers" - $CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . - echo_if_github "::endgroup::" - - # Build the unified Complement image (from the worker Synapse image we just built). - echo_if_github "::group::Build Docker image: complement/Dockerfile" - $CONTAINER_RUNTIME build -t complement-synapse \ - `# This is the tag we end up pushing to the registry (see` \ - `# .github/workflows/push_complement_image.yml) so let's just label it now` \ - `# so people can reference it by the same name locally.` \ - -t ghcr.io/element-hq/synapse/complement-synapse \ - -f "docker/complement/Dockerfile" "docker/complement" - echo_if_github "::endgroup::" + # Build the base Synapse image from the local checkout + echo_if_github "::group::Build Docker image: matrixdotorg/synapse" + $CONTAINER_RUNTIME build -t matrixdotorg/synapse \ + --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \ + --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \ + -f "docker/Dockerfile" . + echo_if_github "::endgroup::" + + # Build the workers docker image (from the base Synapse image we just built). + echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers" + $CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + echo_if_github "::endgroup::" + + # Build the unified Complement image (from the worker Synapse image we just built). + echo_if_github "::group::Build Docker image: complement/Dockerfile" + $CONTAINER_RUNTIME build -t complement-synapse \ + `# This is the tag we end up pushing to the registry (see` \ + `# .github/workflows/push_complement_image.yml) so let's just label it now` \ + `# so people can reference it by the same name locally.` \ + -t ghcr.io/element-hq/synapse/complement-synapse \ + -f "docker/complement/Dockerfile" "docker/complement" + echo_if_github "::endgroup::" fi -fi + fi -if [ -n "$skip_complement_run" ]; then - echo "Skipping Complement run as requested." - exit -fi + if [ -n "$skip_complement_run" ]; then + echo "Skipping Complement run as requested." + return 0 + fi -export COMPLEMENT_BASE_IMAGE=complement-synapse -if [ -n "$use_editable_synapse" ]; then + export COMPLEMENT_BASE_IMAGE=complement-synapse + if [ -n "$use_editable_synapse" ]; then export COMPLEMENT_BASE_IMAGE=complement-synapse-editable export COMPLEMENT_HOST_MOUNTS="$editable_mount" -fi + fi -extra_test_args=() + extra_test_args=() -test_packages=( + test_packages=( ./tests/csapi ./tests ./tests/msc3874 @@ -231,71 +232,80 @@ test_packages=( ./tests/msc4140 ./tests/msc4155 ./tests/msc4306 -) + ) -# Enable dirty runs, so tests will reuse the same container where possible. -# This significantly speeds up tests, but increases the possibility of test pollution. -export COMPLEMENT_ENABLE_DIRTY_RUNS=1 + # Enable dirty runs, so tests will reuse the same container where possible. + # This significantly speeds up tests, but increases the possibility of test pollution. + export COMPLEMENT_ENABLE_DIRTY_RUNS=1 -# All environment variables starting with PASS_ will be shared. -# (The prefix is stripped off before reaching the container.) -export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ + # All environment variables starting with PASS_ will be shared. + # (The prefix is stripped off before reaching the container.) + export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ -# It takes longer than 10m to run the whole suite. -extra_test_args+=("-timeout=60m") + # It takes longer than 10m to run the whole suite. + extra_test_args+=("-timeout=60m") -if [[ -n "$WORKERS" ]]; then - # Use workers. - export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true + if [[ -n "$WORKERS" ]]; then + # Use workers. + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true - # Pass through the workers defined. If none, it will be an empty string - export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES" + # Pass through the workers defined. If none, it will be an empty string + export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES" - # Workers can only use Postgres as a database. - export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + # Workers can only use Postgres as a database. + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres - # And provide some more configuration to complement. + # And provide some more configuration to complement. - # It can take quite a while to spin up a worker-mode Synapse for the first - # time (the main problem is that we start 14 python processes for each test, - # and complement likes to do two of them in parallel). - export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120 -else - export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS= - if [[ -n "$POSTGRES" ]]; then - export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + # It can take quite a while to spin up a worker-mode Synapse for the first + # time (the main problem is that we start 14 python processes for each test, + # and complement likes to do two of them in parallel). + export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120 else - export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS= + if [[ -n "$POSTGRES" ]]; then + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + else + export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite + fi fi -fi -if [[ -n "$ASYNCIO_REACTOR" ]]; then - # Enable the Twisted asyncio reactor - export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true -fi + if [[ -n "$ASYNCIO_REACTOR" ]]; then + # Enable the Twisted asyncio reactor + export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true + fi -if [[ -n "$UNIX_SOCKETS" ]]; then - # Enable full on Unix socket mode for Synapse, Redis and Postgresql - export PASS_SYNAPSE_USE_UNIX_SOCKET=1 -fi + if [[ -n "$UNIX_SOCKETS" ]]; then + # Enable full on Unix socket mode for Synapse, Redis and Postgresql + export PASS_SYNAPSE_USE_UNIX_SOCKET=1 + fi -if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then - # Set the log level to what is desired - export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL" + if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then + # Set the log level to what is desired + export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL" - # Allow logging sensitive things (currently SQL queries & parameters). - # (This won't have any effect if we're not logging at DEBUG level overall.) - # Since this is just a test suite, this is fine and won't reveal anyone's - # personal information - export PASS_SYNAPSE_LOG_SENSITIVE=1 -fi + # Allow logging sensitive things (currently SQL queries & parameters). + # (This won't have any effect if we're not logging at DEBUG level overall.) + # Since this is just a test suite, this is fine and won't reveal anyone's + # personal information + export PASS_SYNAPSE_LOG_SENSITIVE=1 + fi -# Log a few more useful things for a developer attempting to debug something -# particularly tricky. -export PASS_SYNAPSE_LOG_TESTING=1 + # Log a few more useful things for a developer attempting to debug something + # particularly tricky. + export PASS_SYNAPSE_LOG_TESTING=1 -# Run the tests! -echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}" -cd "$COMPLEMENT_DIR" + # Run the tests! + echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}" + cd "$COMPLEMENT_DIR" -go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}" + go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}" +} + +main "$@" +# For any non-zero exit code (indicating some sort of error happened), we want to exit +# with that code. +exit_code=$? +if [ $exit_code -ne 0 ]; then + exit $exit_code +fi From 54c93a1372ed273511908ddc3c3eeacfb9ecdde1 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Fri, 21 Nov 2025 19:01:43 -0600 Subject: [PATCH 03/47] Export `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` from `scripts-dev/complement.sh` (#19208) This is useful as someone downstream can source the `scripts-dev/complement.sh` script and run the same set of tests as Synapse: ```bash # Grab the test packages supported by Synapse. # # --fast: Skip rebuilding the docker images, # --build-only: Will only build Docker images but because we also used `--fast`, it won't do anything. # `>/dev/null` to redirect stdout to `/dev/null` to get rid of the `echo` logs from the script. test_packages=$(source ${SYNAPSE_DIR}/scripts-dev/complement.sh --fast --build-only >/dev/null && echo "$SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES") echo $test_packages ``` This is spawning from wanting to run the same set of Complement tests in the https://github.com/element-hq/synapse-rust-apps project. --- changelog.d/19208.misc | 1 + scripts-dev/complement.sh | 55 +++++++++++++++++++++++++++------------ 2 files changed, 39 insertions(+), 17 deletions(-) create mode 100644 changelog.d/19208.misc diff --git a/changelog.d/19208.misc b/changelog.d/19208.misc new file mode 100644 index 0000000000..1948be309b --- /dev/null +++ b/changelog.d/19208.misc @@ -0,0 +1 @@ +Export `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` environment variable from `scripts-dev/complement.sh`. diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index adb5807325..2447e0dc7b 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -72,6 +72,12 @@ For help on arguments to 'go test', run 'go help testflag'. EOF } +# We use a function to wrap the script logic so that we can use `return` to exit early +# if needed. This is particularly useful so that this script can be sourced by other +# scripts without exiting the calling subshell (composable). This allows us to share +# variables like `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` with other scripts. +# +# Returns an exit code of 0 on success, or 1 on failure. main() { # parse our arguments skip_docker_build="" @@ -204,21 +210,12 @@ main() { echo_if_github "::endgroup::" fi + + echo "Docker images built." + else + echo "Skipping Docker image build as requested." fi - if [ -n "$skip_complement_run" ]; then - echo "Skipping Complement run as requested." - return 0 - fi - - export COMPLEMENT_BASE_IMAGE=complement-synapse - if [ -n "$use_editable_synapse" ]; then - export COMPLEMENT_BASE_IMAGE=complement-synapse-editable - export COMPLEMENT_HOST_MOUNTS="$editable_mount" - fi - - extra_test_args=() - test_packages=( ./tests/csapi ./tests @@ -234,6 +231,16 @@ main() { ./tests/msc4306 ) + # Export the list of test packages as a space-separated environment variable, so other + # scripts can use it. + export SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES="${test_packages[@]}" + + export COMPLEMENT_BASE_IMAGE=complement-synapse + if [ -n "$use_editable_synapse" ]; then + export COMPLEMENT_BASE_IMAGE=complement-synapse-editable + export COMPLEMENT_HOST_MOUNTS="$editable_mount" + fi + # Enable dirty runs, so tests will reuse the same container where possible. # This significantly speeds up tests, but increases the possibility of test pollution. export COMPLEMENT_ENABLE_DIRTY_RUNS=1 @@ -242,8 +249,18 @@ main() { # (The prefix is stripped off before reaching the container.) export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ + # * -count=1: Only run tests once, and disable caching for tests. + # * -v: Output test logs, even if those tests pass. + # * -tags=synapse_blacklist: Enable the `synapse_blacklist` build tag, which is + # necessary for `runtime.Synapse` checks/skips to work in the tests + test_args=( + -v + -tags="synapse_blacklist" + -count=1 + ) + # It takes longer than 10m to run the whole suite. - extra_test_args+=("-timeout=60m") + test_args+=("-timeout=60m") if [[ -n "$WORKERS" ]]; then # Use workers. @@ -295,11 +312,15 @@ main() { # particularly tricky. export PASS_SYNAPSE_LOG_TESTING=1 + if [ -n "$skip_complement_run" ]; then + echo "Skipping Complement run as requested." + return 0 + fi + # Run the tests! - echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}" + echo "Running Complement with ${test_args[@]} $@ ${test_packages[@]}" cd "$COMPLEMENT_DIR" - - go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}" + go test "${test_args[@]}" "$@" "${test_packages[@]}" } main "$@" From ea3e08c49cd31762aa9ebb267f8c8ac2097f506c Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 15:58:26 +0000 Subject: [PATCH 04/47] Bump attrs from 25.3.0 to 25.4.0 (#19215) Bumps [attrs](https://github.com/sponsors/hynek) from 25.3.0 to 25.4.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=attrs&package-manager=pip&previous-version=25.3.0&new-version=25.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 16 ++++------------ 1 file changed, 4 insertions(+), 12 deletions(-) diff --git a/poetry.lock b/poetry.lock index dec659db32..019805c605 100644 --- a/poetry.lock +++ b/poetry.lock @@ -14,24 +14,16 @@ files = [ [[package]] name = "attrs" -version = "25.3.0" +version = "25.4.0" description = "Classes Without Boilerplate" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main", "dev"] files = [ - {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, - {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, + {file = "attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373"}, + {file = "attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11"}, ] -[package.extras] -benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] - [[package]] name = "authlib" version = "1.6.5" From a5d946bfcbf2cb7f194c0724e47fad7ce489f6ef Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 16:29:32 +0000 Subject: [PATCH 05/47] Bump types-bleach from 6.2.0.20250809 to 6.3.0.20251115 (#19217) Bumps [types-bleach](https://github.com/typeshed-internal/stub_uploader) from 6.2.0.20250809 to 6.3.0.20251115.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=types-bleach&package-manager=pip&previous-version=6.2.0.20250809&new-version=6.3.0.20251115)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 019805c605..be5f1847c4 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2976,14 +2976,14 @@ twisted = "*" [[package]] name = "types-bleach" -version = "6.2.0.20250809" +version = "6.3.0.20251115" description = "Typing stubs for bleach" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "types_bleach-6.2.0.20250809-py3-none-any.whl", hash = "sha256:0b372a75117947d9ac8a31ae733fd0f8d92ec75c4772e7b37093ba3fa5b48fb9"}, - {file = "types_bleach-6.2.0.20250809.tar.gz", hash = "sha256:188d7a1119f6c953140b513ed57ba4213755695815472c19d0c22ac09c79b90b"}, + {file = "types_bleach-6.3.0.20251115-py3-none-any.whl", hash = "sha256:f81e7cf4ebac3f3d60b66b3fd5236c324e65037d1b28d22c94d5b457f0b98f42"}, + {file = "types_bleach-6.3.0.20251115.tar.gz", hash = "sha256:96911b20f169a18524d03b61fa7e98a08c411292f7cdb5dc191057f55dad9ae3"}, ] [package.dependencies] From 1b78f0318aa51138c4e5c7c2f87ae69b7431f33a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 16:30:09 +0000 Subject: [PATCH 06/47] Bump rpds-py from 0.28.0 to 0.29.0 (#19216) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [rpds-py](https://github.com/crate-py/rpds) from 0.28.0 to 0.29.0.
Release notes

Sourced from rpds-py's releases.

v0.29.0

What's Changed

Full Changelog: https://github.com/crate-py/rpds/compare/v0.28.0...v0.29.0

Commits
  • 5fb6f35 Prepare for 0.29.0
  • d17dbd1 Add rpds's Stack.
  • 74707af Follow the rpds API more closely for Queue.
  • 41455f3 -> native uv for dpeendency groups.
  • e93532d Use 3.14 by default in nox.
  • 020c41f Remove dead hooks.
  • 6e08b75 Accept zizmor's cooldown suggestions for dependabot.
  • a5d40a9 Merge pull request #197 from crate-py/dependabot/cargo/rpds-1.2.0
  • b830be1 Merge pull request #198 from crate-py/dependabot/github_actions/softprops/act...
  • e7ac330 Merge pull request #199 from crate-py/pre-commit-ci-update-config
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=rpds-py&package-manager=pip&previous-version=0.28.0&new-version=0.29.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 232 ++++++++++++++++++++++++++-------------------------- 1 file changed, 116 insertions(+), 116 deletions(-) diff --git a/poetry.lock b/poetry.lock index be5f1847c4..5a79b932a5 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2356,127 +2356,127 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.28.0" +version = "0.29.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.10" groups = ["main", "dev"] files = [ - {file = "rpds_py-0.28.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7b6013db815417eeb56b2d9d7324e64fcd4fa289caeee6e7a78b2e11fc9b438a"}, - {file = "rpds_py-0.28.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a4c6b05c685c0c03f80dabaeb73e74218c49deea965ca63f76a752807397207"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4794c6c3fbe8f9ac87699b131a1f26e7b4abcf6d828da46a3a52648c7930eba"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e8456b6ee5527112ff2354dd9087b030e3429e43a74f480d4a5ca79d269fd85"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:beb880a9ca0a117415f241f66d56025c02037f7c4efc6fe59b5b8454f1eaa50d"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6897bebb118c44b38c9cb62a178e09f1593c949391b9a1a6fe777ccab5934ee7"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b553dd06e875249fd43efd727785efb57a53180e0fde321468222eabbeaafa"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:f0b2044fdddeea5b05df832e50d2a06fe61023acb44d76978e1b060206a8a476"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05cf1e74900e8da73fa08cc76c74a03345e5a3e37691d07cfe2092d7d8e27b04"}, - {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:efd489fec7c311dae25e94fe7eeda4b3d06be71c68f2cf2e8ef990ffcd2cd7e8"}, - {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ada7754a10faacd4f26067e62de52d6af93b6d9542f0df73c57b9771eb3ba9c4"}, - {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c2a34fd26588949e1e7977cfcbb17a9a42c948c100cab890c6d8d823f0586457"}, - {file = "rpds_py-0.28.0-cp310-cp310-win32.whl", hash = "sha256:f9174471d6920cbc5e82a7822de8dfd4dcea86eb828b04fc8c6519a77b0ee51e"}, - {file = "rpds_py-0.28.0-cp310-cp310-win_amd64.whl", hash = "sha256:6e32dd207e2c4f8475257a3540ab8a93eff997abfa0a3fdb287cae0d6cd874b8"}, - {file = "rpds_py-0.28.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:03065002fd2e287725d95fbc69688e0c6daf6c6314ba38bdbaa3895418e09296"}, - {file = "rpds_py-0.28.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28ea02215f262b6d078daec0b45344c89e161eab9526b0d898221d96fdda5f27"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25dbade8fbf30bcc551cb352376c0ad64b067e4fc56f90e22ba70c3ce205988c"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c03002f54cc855860bfdc3442928ffdca9081e73b5b382ed0b9e8efe6e5e205"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9699fa7990368b22032baf2b2dce1f634388e4ffc03dfefaaac79f4695edc95"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9b06fe1a75e05e0713f06ea0c89ecb6452210fd60e2f1b6ddc1067b990e08d9"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9f83e7b326a3f9ec3ef84cda98fb0a74c7159f33e692032233046e7fd15da2"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:0d3259ea9ad8743a75a43eb7819324cdab393263c91be86e2d1901ee65c314e0"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a7548b345f66f6695943b4ef6afe33ccd3f1b638bd9afd0f730dd255c249c9e"}, - {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9a40040aa388b037eb39416710fbcce9443498d2eaab0b9b45ae988b53f5c67"}, - {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8f60c7ea34e78c199acd0d3cda37a99be2c861dd2b8cf67399784f70c9f8e57d"}, - {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1571ae4292649100d743b26d5f9c63503bb1fedf538a8f29a98dce2d5ba6b4e6"}, - {file = "rpds_py-0.28.0-cp311-cp311-win32.whl", hash = "sha256:5cfa9af45e7c1140af7321fa0bef25b386ee9faa8928c80dc3a5360971a29e8c"}, - {file = "rpds_py-0.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:dd8d86b5d29d1b74100982424ba53e56033dc47720a6de9ba0259cf81d7cecaa"}, - {file = "rpds_py-0.28.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e27d3a5709cc2b3e013bf93679a849213c79ae0573f9b894b284b55e729e120"}, - {file = "rpds_py-0.28.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6b4f28583a4f247ff60cd7bdda83db8c3f5b05a7a82ff20dd4b078571747708f"}, - {file = "rpds_py-0.28.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d678e91b610c29c4b3d52a2c148b641df2b4676ffe47c59f6388d58b99cdc424"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e819e0e37a44a78e1383bf1970076e2ccc4dc8c2bbaa2f9bd1dc987e9afff628"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5ee514e0f0523db5d3fb171f397c54875dbbd69760a414dccf9d4d7ad628b5bd"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f3fa06d27fdcee47f07a39e02862da0100cb4982508f5ead53ec533cd5fe55e"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46959ef2e64f9e4a41fc89aa20dbca2b85531f9a72c21099a3360f35d10b0d5a"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8455933b4bcd6e83fde3fefc987a023389c4b13f9a58c8d23e4b3f6d13f78c84"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:ad50614a02c8c2962feebe6012b52f9802deec4263946cddea37aaf28dd25a66"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5deca01b271492553fdb6c7fd974659dce736a15bae5dad7ab8b93555bceb28"}, - {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:735f8495a13159ce6a0d533f01e8674cec0c57038c920495f87dcb20b3ddb48a"}, - {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:961ca621ff10d198bbe6ba4957decca61aa2a0c56695384c1d6b79bf61436df5"}, - {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2374e16cc9131022e7d9a8f8d65d261d9ba55048c78f3b6e017971a4f5e6353c"}, - {file = "rpds_py-0.28.0-cp312-cp312-win32.whl", hash = "sha256:d15431e334fba488b081d47f30f091e5d03c18527c325386091f31718952fe08"}, - {file = "rpds_py-0.28.0-cp312-cp312-win_amd64.whl", hash = "sha256:a410542d61fc54710f750d3764380b53bf09e8c4edbf2f9141a82aa774a04f7c"}, - {file = "rpds_py-0.28.0-cp312-cp312-win_arm64.whl", hash = "sha256:1f0cfd1c69e2d14f8c892b893997fa9a60d890a0c8a603e88dca4955f26d1edd"}, - {file = "rpds_py-0.28.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e9e184408a0297086f880556b6168fa927d677716f83d3472ea333b42171ee3b"}, - {file = "rpds_py-0.28.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:edd267266a9b0448f33dc465a97cfc5d467594b600fe28e7fa2f36450e03053a"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85beb8b3f45e4e32f6802fb6cd6b17f615ef6c6a52f265371fb916fae02814aa"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d2412be8d00a1b895f8ad827cc2116455196e20ed994bb704bf138fe91a42724"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cf128350d384b777da0e68796afdcebc2e9f63f0e9f242217754e647f6d32491"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2036d09b363aa36695d1cc1a97b36865597f4478470b0697b5ee9403f4fe399"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8e1e9be4fa6305a16be628959188e4fd5cd6f1b0e724d63c6d8b2a8adf74ea6"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:0a403460c9dd91a7f23fc3188de6d8977f1d9603a351d5db6cf20aaea95b538d"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d7366b6553cdc805abcc512b849a519167db8f5e5c3472010cd1228b224265cb"}, - {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b43c6a3726efd50f18d8120ec0551241c38785b68952d240c45ea553912ac41"}, - {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0cb7203c7bc69d7c1585ebb33a2e6074492d2fc21ad28a7b9d40457ac2a51ab7"}, - {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7a52a5169c664dfb495882adc75c304ae1d50df552fbd68e100fdc719dee4ff9"}, - {file = "rpds_py-0.28.0-cp313-cp313-win32.whl", hash = "sha256:2e42456917b6687215b3e606ab46aa6bca040c77af7df9a08a6dcfe8a4d10ca5"}, - {file = "rpds_py-0.28.0-cp313-cp313-win_amd64.whl", hash = "sha256:e0a0311caedc8069d68fc2bf4c9019b58a2d5ce3cd7cb656c845f1615b577e1e"}, - {file = "rpds_py-0.28.0-cp313-cp313-win_arm64.whl", hash = "sha256:04c1b207ab8b581108801528d59ad80aa83bb170b35b0ddffb29c20e411acdc1"}, - {file = "rpds_py-0.28.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f296ea3054e11fc58ad42e850e8b75c62d9a93a9f981ad04b2e5ae7d2186ff9c"}, - {file = "rpds_py-0.28.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5a7306c19b19005ad98468fcefeb7100b19c79fc23a5f24a12e06d91181193fa"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5d9b86aa501fed9862a443c5c3116f6ead8bc9296185f369277c42542bd646b"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5bbc701eff140ba0e872691d573b3d5d30059ea26e5785acba9132d10c8c31d"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5690671cd672a45aa8616d7374fdf334a1b9c04a0cac3c854b1136e92374fe"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f1d92ecea4fa12f978a367c32a5375a1982834649cdb96539dcdc12e609ab1a"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d252db6b1a78d0a3928b6190156042d54c93660ce4d98290d7b16b5296fb7cc"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d61b355c3275acb825f8777d6c4505f42b5007e357af500939d4a35b19177259"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:acbe5e8b1026c0c580d0321c8aae4b0a1e1676861d48d6e8c6586625055b606a"}, - {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8aa23b6f0fc59b85b4c7d89ba2965af274346f738e8d9fc2455763602e62fd5f"}, - {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7b14b0c680286958817c22d76fcbca4800ddacef6f678f3a7c79a1fe7067fe37"}, - {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bcf1d210dfee61a6c86551d67ee1031899c0fdbae88b2d44a569995d43797712"}, - {file = "rpds_py-0.28.0-cp313-cp313t-win32.whl", hash = "sha256:3aa4dc0fdab4a7029ac63959a3ccf4ed605fee048ba67ce89ca3168da34a1342"}, - {file = "rpds_py-0.28.0-cp313-cp313t-win_amd64.whl", hash = "sha256:7b7d9d83c942855e4fdcfa75d4f96f6b9e272d42fffcb72cd4bb2577db2e2907"}, - {file = "rpds_py-0.28.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:dcdcb890b3ada98a03f9f2bb108489cdc7580176cb73b4f2d789e9a1dac1d472"}, - {file = "rpds_py-0.28.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f274f56a926ba2dc02976ca5b11c32855cbd5925534e57cfe1fda64e04d1add2"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fe0438ac4a29a520ea94c8c7f1754cdd8feb1bc490dfda1bfd990072363d527"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a358a32dd3ae50e933347889b6af9a1bdf207ba5d1a3f34e1a38cd3540e6733"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e80848a71c78aa328fefaba9c244d588a342c8e03bda518447b624ea64d1ff56"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f586db2e209d54fe177e58e0bc4946bea5fb0102f150b1b2f13de03e1f0976f8"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ae8ee156d6b586e4292491e885d41483136ab994e719a13458055bec14cf370"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:a805e9b3973f7e27f7cab63a6b4f61d90f2e5557cff73b6e97cd5b8540276d3d"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5d3fd16b6dc89c73a4da0b4ac8b12a7ecc75b2864b95c9e5afed8003cb50a728"}, - {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:6796079e5d24fdaba6d49bda28e2c47347e89834678f2bc2c1b4fc1489c0fb01"}, - {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:76500820c2af232435cbe215e3324c75b950a027134e044423f59f5b9a1ba515"}, - {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bbdc5640900a7dbf9dd707fe6388972f5bbd883633eb68b76591044cfe346f7e"}, - {file = "rpds_py-0.28.0-cp314-cp314-win32.whl", hash = "sha256:adc8aa88486857d2b35d75f0640b949759f79dc105f50aa2c27816b2e0dd749f"}, - {file = "rpds_py-0.28.0-cp314-cp314-win_amd64.whl", hash = "sha256:66e6fa8e075b58946e76a78e69e1a124a21d9a48a5b4766d15ba5b06869d1fa1"}, - {file = "rpds_py-0.28.0-cp314-cp314-win_arm64.whl", hash = "sha256:a6fe887c2c5c59413353b7c0caff25d0e566623501ccfff88957fa438a69377d"}, - {file = "rpds_py-0.28.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7a69df082db13c7070f7b8b1f155fa9e687f1d6aefb7b0e3f7231653b79a067b"}, - {file = "rpds_py-0.28.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b1cde22f2c30ebb049a9e74c5374994157b9b70a16147d332f89c99c5960737a"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5338742f6ba7a51012ea470bd4dc600a8c713c0c72adaa0977a1b1f4327d6592"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1460ebde1bcf6d496d80b191d854adedcc619f84ff17dc1c6d550f58c9efbba"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e3eb248f2feba84c692579257a043a7699e28a77d86c77b032c1d9fbb3f0219c"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3bbba5def70b16cd1c1d7255666aad3b290fbf8d0fe7f9f91abafb73611a91"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3114f4db69ac5a1f32e7e4d1cbbe7c8f9cf8217f78e6e002cedf2d54c2a548ed"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:4b0cb8a906b1a0196b863d460c0222fb8ad0f34041568da5620f9799b83ccf0b"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf681ac76a60b667106141e11a92a3330890257e6f559ca995fbb5265160b56e"}, - {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1e8ee6413cfc677ce8898d9cde18cc3a60fc2ba756b0dec5b71eb6eb21c49fa1"}, - {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b3072b16904d0b5572a15eb9d31c1954e0d3227a585fc1351aa9878729099d6c"}, - {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b670c30fd87a6aec281c3c9896d3bae4b205fd75d79d06dc87c2503717e46092"}, - {file = "rpds_py-0.28.0-cp314-cp314t-win32.whl", hash = "sha256:8014045a15b4d2b3476f0a287fcc93d4f823472d7d1308d47884ecac9e612be3"}, - {file = "rpds_py-0.28.0-cp314-cp314t-win_amd64.whl", hash = "sha256:7a4e59c90d9c27c561eb3160323634a9ff50b04e4f7820600a2beb0ac90db578"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f5e7101145427087e493b9c9b959da68d357c28c562792300dd21a095118ed16"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:31eb671150b9c62409a888850aaa8e6533635704fe2b78335f9aaf7ff81eec4d"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b55c1f64482f7d8bd39942f376bfdf2f6aec637ee8c805b5041e14eeb771db"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24743a7b372e9a76171f6b69c01aedf927e8ac3e16c474d9fe20d552a8cb45c7"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:389c29045ee8bbb1627ea190b4976a310a295559eaf9f1464a1a6f2bf84dde78"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23690b5827e643150cf7b49569679ec13fe9a610a15949ed48b85eb7f98f34ec"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f0c9266c26580e7243ad0d72fc3e01d6b33866cfab5084a6da7576bcf1c4f72"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4c6c4db5d73d179746951486df97fd25e92396be07fc29ee8ff9a8f5afbdfb27"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3b695a8fa799dd2cfdb4804b37096c5f6dba1ac7f48a7fbf6d0485bcd060316"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:6aa1bfce3f83baf00d9c5fcdbba93a3ab79958b4c7d7d1f55e7fe68c20e63912"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b0f9dceb221792b3ee6acb5438eb1f02b0cb2c247796a72b016dcc92c6de829"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5d0145edba8abd3db0ab22b5300c99dc152f5c9021fab861be0f0544dc3cbc5f"}, - {file = "rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea"}, + {file = "rpds_py-0.29.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:4ae4b88c6617e1b9e5038ab3fccd7bac0842fdda2b703117b2aa99bc85379113"}, + {file = "rpds_py-0.29.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7d9128ec9d8cecda6f044001fde4fb71ea7c24325336612ef8179091eb9596b9"}, + {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37812c3da8e06f2bb35b3cf10e4a7b68e776a706c13058997238762b4e07f4f"}, + {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:66786c3fb1d8de416a7fa8e1cb1ec6ba0a745b2b0eee42f9b7daa26f1a495545"}, + {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58f5c77f1af888b5fd1876c9a0d9858f6f88a39c9dd7c073a88e57e577da66d"}, + {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:799156ef1f3529ed82c36eb012b5d7a4cf4b6ef556dd7cc192148991d07206ae"}, + {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453783477aa4f2d9104c4b59b08c871431647cb7af51b549bbf2d9eb9c827756"}, + {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:24a7231493e3c4a4b30138b50cca089a598e52c34cf60b2f35cebf62f274fdea"}, + {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7033c1010b1f57bb44d8067e8c25aa6fa2e944dbf46ccc8c92b25043839c3fd2"}, + {file = "rpds_py-0.29.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0248b19405422573621172ab8e3a1f29141362d13d9f72bafa2e28ea0cdca5a2"}, + {file = "rpds_py-0.29.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f9f436aee28d13b9ad2c764fc273e0457e37c2e61529a07b928346b219fcde3b"}, + {file = "rpds_py-0.29.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:24a16cb7163933906c62c272de20ea3c228e4542c8c45c1d7dc2b9913e17369a"}, + {file = "rpds_py-0.29.0-cp310-cp310-win32.whl", hash = "sha256:1a409b0310a566bfd1be82119891fefbdce615ccc8aa558aff7835c27988cbef"}, + {file = "rpds_py-0.29.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5523b0009e7c3c1263471b69d8da1c7d41b3ecb4cb62ef72be206b92040a950"}, + {file = "rpds_py-0.29.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9b9c764a11fd637e0322a488560533112837f5334ffeb48b1be20f6d98a7b437"}, + {file = "rpds_py-0.29.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fd2164d73812026ce970d44c3ebd51e019d2a26a4425a5dcbdfa93a34abc383"}, + {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a097b7f7f7274164566ae90a221fd725363c0e9d243e2e9ed43d195ccc5495c"}, + {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cdc0490374e31cedefefaa1520d5fe38e82fde8748cbc926e7284574c714d6b"}, + {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89ca2e673ddd5bde9b386da9a0aac0cab0e76f40c8f0aaf0d6311b6bbf2aa311"}, + {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5d9da3ff5af1ca1249b1adb8ef0573b94c76e6ae880ba1852f033bf429d4588"}, + {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8238d1d310283e87376c12f658b61e1ee23a14c0e54c7c0ce953efdbdc72deed"}, + {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:2d6fb2ad1c36f91c4646989811e84b1ea5e0c3cf9690b826b6e32b7965853a63"}, + {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:534dc9df211387547267ccdb42253aa30527482acb38dd9b21c5c115d66a96d2"}, + {file = "rpds_py-0.29.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d456e64724a075441e4ed648d7f154dc62e9aabff29bcdf723d0c00e9e1d352f"}, + {file = "rpds_py-0.29.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a738f2da2f565989401bd6fd0b15990a4d1523c6d7fe83f300b7e7d17212feca"}, + {file = "rpds_py-0.29.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a110e14508fd26fd2e472bb541f37c209409876ba601cf57e739e87d8a53cf95"}, + {file = "rpds_py-0.29.0-cp311-cp311-win32.whl", hash = "sha256:923248a56dd8d158389a28934f6f69ebf89f218ef96a6b216a9be6861804d3f4"}, + {file = "rpds_py-0.29.0-cp311-cp311-win_amd64.whl", hash = "sha256:539eb77eb043afcc45314d1be09ea6d6cafb3addc73e0547c171c6d636957f60"}, + {file = "rpds_py-0.29.0-cp311-cp311-win_arm64.whl", hash = "sha256:bdb67151ea81fcf02d8f494703fb728d4d34d24556cbff5f417d74f6f5792e7c"}, + {file = "rpds_py-0.29.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a0891cfd8db43e085c0ab93ab7e9b0c8fee84780d436d3b266b113e51e79f954"}, + {file = "rpds_py-0.29.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3897924d3f9a0361472d884051f9a2460358f9a45b1d85a39a158d2f8f1ad71c"}, + {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21deb8e0d1571508c6491ce5ea5e25669b1dd4adf1c9d64b6314842f708b5d"}, + {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9efe71687d6427737a0a2de9ca1c0a216510e6cd08925c44162be23ed7bed2d5"}, + {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40f65470919dc189c833e86b2c4bd21bd355f98436a2cef9e0a9a92aebc8e57e"}, + {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:def48ff59f181130f1a2cb7c517d16328efac3ec03951cca40c1dc2049747e83"}, + {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad7bd570be92695d89285a4b373006930715b78d96449f686af422debb4d3949"}, + {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:5a572911cd053137bbff8e3a52d31c5d2dba51d3a67ad902629c70185f3f2181"}, + {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d583d4403bcbf10cffc3ab5cee23d7643fcc960dff85973fd3c2d6c86e8dbb0c"}, + {file = "rpds_py-0.29.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:070befbb868f257d24c3bb350dbd6e2f645e83731f31264b19d7231dd5c396c7"}, + {file = "rpds_py-0.29.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fc935f6b20b0c9f919a8ff024739174522abd331978f750a74bb68abd117bd19"}, + {file = "rpds_py-0.29.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c5a8ecaa44ce2d8d9d20a68a2483a74c07f05d72e94a4dff88906c8807e77b0"}, + {file = "rpds_py-0.29.0-cp312-cp312-win32.whl", hash = "sha256:ba5e1aeaf8dd6d8f6caba1f5539cddda87d511331714b7b5fc908b6cfc3636b7"}, + {file = "rpds_py-0.29.0-cp312-cp312-win_amd64.whl", hash = "sha256:b5f6134faf54b3cb83375db0f113506f8b7770785be1f95a631e7e2892101977"}, + {file = "rpds_py-0.29.0-cp312-cp312-win_arm64.whl", hash = "sha256:b016eddf00dca7944721bf0cd85b6af7f6c4efaf83ee0b37c4133bd39757a8c7"}, + {file = "rpds_py-0.29.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1585648d0760b88292eecab5181f5651111a69d90eff35d6b78aa32998886a61"}, + {file = "rpds_py-0.29.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:521807963971a23996ddaf764c682b3e46459b3c58ccd79fefbe16718db43154"}, + {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a8896986efaa243ab713c69e6491a4138410f0fe36f2f4c71e18bd5501e8014"}, + {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d24564a700ef41480a984c5ebed62b74e6ce5860429b98b1fede76049e953e6"}, + {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6596b93c010d386ae46c9fba9bfc9fc5965fa8228edeac51576299182c2e31c"}, + {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5cc58aac218826d054c7da7f95821eba94125d88be673ff44267bb89d12a5866"}, + {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de73e40ebc04dd5d9556f50180395322193a78ec247e637e741c1b954810f295"}, + {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:295ce5ac7f0cf69a651ea75c8f76d02a31f98e5698e82a50a5f4d4982fbbae3b"}, + {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1ea59b23ea931d494459c8338056fe7d93458c0bf3ecc061cd03916505369d55"}, + {file = "rpds_py-0.29.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f49d41559cebd608042fdcf54ba597a4a7555b49ad5c1c0c03e0af82692661cd"}, + {file = "rpds_py-0.29.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:05a2bd42768ea988294ca328206efbcc66e220d2d9b7836ee5712c07ad6340ea"}, + {file = "rpds_py-0.29.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33ca7bdfedd83339ca55da3a5e1527ee5870d4b8369456b5777b197756f3ca22"}, + {file = "rpds_py-0.29.0-cp313-cp313-win32.whl", hash = "sha256:20c51ae86a0bb9accc9ad4e6cdeec58d5ebb7f1b09dd4466331fc65e1766aae7"}, + {file = "rpds_py-0.29.0-cp313-cp313-win_amd64.whl", hash = "sha256:6410e66f02803600edb0b1889541f4b5cc298a5ccda0ad789cc50ef23b54813e"}, + {file = "rpds_py-0.29.0-cp313-cp313-win_arm64.whl", hash = "sha256:56838e1cd9174dc23c5691ee29f1d1be9eab357f27efef6bded1328b23e1ced2"}, + {file = "rpds_py-0.29.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:37d94eadf764d16b9a04307f2ab1d7af6dc28774bbe0535c9323101e14877b4c"}, + {file = "rpds_py-0.29.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d472cf73efe5726a067dce63eebe8215b14beabea7c12606fd9994267b3cfe2b"}, + {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72fdfd5ff8992e4636621826371e3ac5f3e3b8323e9d0e48378e9c13c3dac9d0"}, + {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2549d833abdf8275c901313b9e8ff8fba57e50f6a495035a2a4e30621a2f7cc4"}, + {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4448dad428f28a6a767c3e3b80cde3446a22a0efbddaa2360f4bb4dc836d0688"}, + {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:115f48170fd4296a33938d8c11f697f5f26e0472e43d28f35624764173a60e4d"}, + {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5bb73ffc029820f4348e9b66b3027493ae00bca6629129cd433fd7a76308ee"}, + {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:b1581fcde18fcdf42ea2403a16a6b646f8eb1e58d7f90a0ce693da441f76942e"}, + {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16e9da2bda9eb17ea318b4c335ec9ac1818e88922cbe03a5743ea0da9ecf74fb"}, + {file = "rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:28fd300326dd21198f311534bdb6d7e989dd09b3418b3a91d54a0f384c700967"}, + {file = "rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2aba991e041d031c7939e1358f583ae405a7bf04804ca806b97a5c0e0af1ea5e"}, + {file = "rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:7f437026dbbc3f08c99cc41a5b2570c6e1a1ddbe48ab19a9b814254128d4ea7a"}, + {file = "rpds_py-0.29.0-cp313-cp313t-win32.whl", hash = "sha256:6e97846e9800a5d0fe7be4d008f0c93d0feeb2700da7b1f7528dabafb31dfadb"}, + {file = "rpds_py-0.29.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f49196aec7c4b406495f60e6f947ad71f317a765f956d74bbd83996b9edc0352"}, + {file = "rpds_py-0.29.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:394d27e4453d3b4d82bb85665dc1fcf4b0badc30fc84282defed71643b50e1a1"}, + {file = "rpds_py-0.29.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55d827b2ae95425d3be9bc9a5838b6c29d664924f98146557f7715e331d06df8"}, + {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc31a07ed352e5462d3ee1b22e89285f4ce97d5266f6d1169da1142e78045626"}, + {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4695dd224212f6105db7ea62197144230b808d6b2bba52238906a2762f1d1e7"}, + {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcae1770b401167f8b9e1e3f566562e6966ffa9ce63639916248a9e25fa8a244"}, + {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90f30d15f45048448b8da21c41703b31c61119c06c216a1bf8c245812a0f0c17"}, + {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a91e0ab77bdc0004b43261a4b8cd6d6b451e8d443754cfda830002b5745b32"}, + {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:4aa195e5804d32c682e453b34474f411ca108e4291c6a0f824ebdc30a91c973c"}, + {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7971bdb7bf4ee0f7e6f67fa4c7fbc6019d9850cc977d126904392d363f6f8318"}, + {file = "rpds_py-0.29.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8ae33ad9ce580c7a47452c3b3f7d8a9095ef6208e0a0c7e4e2384f9fc5bf8212"}, + {file = "rpds_py-0.29.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c661132ab2fb4eeede2ef69670fd60da5235209874d001a98f1542f31f2a8a94"}, + {file = "rpds_py-0.29.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bb78b3a0d31ac1bde132c67015a809948db751cb4e92cdb3f0b242e430b6ed0d"}, + {file = "rpds_py-0.29.0-cp314-cp314-win32.whl", hash = "sha256:f475f103488312e9bd4000bc890a95955a07b2d0b6e8884aef4be56132adbbf1"}, + {file = "rpds_py-0.29.0-cp314-cp314-win_amd64.whl", hash = "sha256:b9cf2359a4fca87cfb6801fae83a76aedf66ee1254a7a151f1341632acf67f1b"}, + {file = "rpds_py-0.29.0-cp314-cp314-win_arm64.whl", hash = "sha256:9ba8028597e824854f0f1733d8b964e914ae3003b22a10c2c664cb6927e0feb9"}, + {file = "rpds_py-0.29.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:e71136fd0612556b35c575dc2726ae04a1669e6a6c378f2240312cf5d1a2ab10"}, + {file = "rpds_py-0.29.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:76fe96632d53f3bf0ea31ede2f53bbe3540cc2736d4aec3b3801b0458499ef3a"}, + {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9459a33f077130dbb2c7c3cea72ee9932271fb3126404ba2a2661e4fe9eb7b79"}, + {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5c9546cfdd5d45e562cc0444b6dddc191e625c62e866bf567a2c69487c7ad28a"}, + {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12597d11d97b8f7e376c88929a6e17acb980e234547c92992f9f7c058f1a7310"}, + {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28de03cf48b8a9e6ec10318f2197b83946ed91e2891f651a109611be4106ac4b"}, + {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd7951c964069039acc9d67a8ff1f0a7f34845ae180ca542b17dc1456b1f1808"}, + {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:c07d107b7316088f1ac0177a7661ca0c6670d443f6fe72e836069025e6266761"}, + {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de2345af363d25696969befc0c1688a6cb5e8b1d32b515ef84fc245c6cddba3"}, + {file = "rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:00e56b12d2199ca96068057e1ae7f9998ab6e99cda82431afafd32f3ec98cca9"}, + {file = "rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3919a3bbecee589300ed25000b6944174e07cd20db70552159207b3f4bbb45b8"}, + {file = "rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e7fa2ccc312bbd91e43aa5e0869e46bc03278a3dddb8d58833150a18b0f0283a"}, + {file = "rpds_py-0.29.0-cp314-cp314t-win32.whl", hash = "sha256:97c817863ffc397f1e6a6e9d2d89fe5408c0a9922dac0329672fb0f35c867ea5"}, + {file = "rpds_py-0.29.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2023473f444752f0f82a58dfcbee040d0a1b3d1b3c2ec40e884bd25db6d117d2"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:acd82a9e39082dc5f4492d15a6b6c8599aa21db5c35aaf7d6889aea16502c07d"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:715b67eac317bf1c7657508170a3e011a1ea6ccb1c9d5f296e20ba14196be6b3"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3b1b87a237cb2dba4db18bcfaaa44ba4cd5936b91121b62292ff21df577fc43"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1c3c3e8101bb06e337c88eb0c0ede3187131f19d97d43ea0e1c5407ea74c0cbf"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8e54d6e61f3ecd3abe032065ce83ea63417a24f437e4a3d73d2f85ce7b7cfe"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3fbd4e9aebf110473a420dea85a238b254cf8a15acb04b22a5a6b5ce8925b760"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fdf53d36e6c72819993e35d1ebeeb8e8fc688d0c6c2b391b55e335b3afba5a"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:ea7173df5d86f625f8dde6d5929629ad811ed8decda3b60ae603903839ac9ac0"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:76054d540061eda273274f3d13a21a4abdde90e13eaefdc205db37c05230efce"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:9f84c549746a5be3bc7415830747a3a0312573afc9f95785eb35228bb17742ec"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:0ea962671af5cb9a260489e311fa22b2e97103e3f9f0caaea6f81390af96a9ed"}, + {file = "rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:f7728653900035fb7b8d06e1e5900545d8088efc9d5d4545782da7df03ec803f"}, + {file = "rpds_py-0.29.0.tar.gz", hash = "sha256:fe55fe686908f50154d1dc599232016e50c243b438c3b7432f24e2895b0e5359"}, ] [[package]] From f3975ce247e8a0a9df6104ec8458deb5af566a72 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 16:30:41 +0000 Subject: [PATCH 07/47] Bump actions/setup-go from 6.0.0 to 6.1.0 (#19214) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/setup-go](https://github.com/actions/setup-go) from 6.0.0 to 6.1.0.
Release notes

Sourced from actions/setup-go's releases.

v6.1.0

What's Changed

Enhancements

Dependency updates

New Contributors

Full Changelog: https://github.com/actions/setup-go/compare/v6...v6.1.0

Commits
  • 4dc6199 Bump semver and @​types/semver (#652)
  • f3787be Add comprehensive breaking changes documentation for v6 (#674)
  • 3a0c2c8 Bump actions/publish-action from 0.3.0 to 0.4.0 (#641)
  • faf5242 Add support for .tool-versions file in setup-go, update workflow (#673)
  • 7bc60db Fall back to downloading from go.dev/dl instead of storage.googleapis.com/gol...
  • c0137ca Bump eslint-config-prettier from 10.0.1 to 10.1.8 and document breaking chang...
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/setup-go&package-manager=github_actions&previous-version=6.0.0&new-version=6.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/latest_deps.yml | 2 +- .github/workflows/tests.yml | 2 +- .github/workflows/twisted_trunk.yml | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 2076a1c1e1..276b6cd383 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -209,7 +209,7 @@ jobs: - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f320e89069..ce01be153c 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -705,7 +705,7 @@ jobs: - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 11b7bfe143..a252816e05 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -182,7 +182,7 @@ jobs: - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod From 9fb2b1731bff61fb326d78e5f6ace268b48e70b8 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 16:37:55 +0000 Subject: [PATCH 08/47] Bump actions/checkout from 5.0.0 to 6.0.0 (#19213) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/checkout](https://github.com/actions/checkout) from 5.0.0 to 6.0.0.
Release notes

Sourced from actions/checkout's releases.

v6.0.0

What's Changed

Full Changelog: https://github.com/actions/checkout/compare/v5.0.0...v6.0.0

v6-beta

What's Changed

Updated persist-credentials to store the credentials under $RUNNER_TEMP instead of directly in the local git config.

This requires a minimum Actions Runner version of v2.329.0 to access the persisted credentials for Docker container action scenarios.

v5.0.1

What's Changed

Full Changelog: https://github.com/actions/checkout/compare/v5...v5.0.1

Changelog

Sourced from actions/checkout's changelog.

Changelog

V6.0.0

V5.0.1

V5.0.0

V4.3.1

V4.3.0

v4.2.2

v4.2.1

v4.2.0

v4.1.7

v4.1.6

v4.1.5

... (truncated)

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/checkout&package-manager=github_actions&previous-version=5.0.0&new-version=6.0.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- .github/workflows/docs-pr.yaml | 4 +- .github/workflows/docs.yaml | 2 +- .github/workflows/fix_lint.yaml | 2 +- .github/workflows/latest_deps.yml | 10 ++--- .github/workflows/poetry_lockfile.yaml | 2 +- .github/workflows/push_complement_image.yml | 6 +-- .github/workflows/release-artifacts.yml | 8 ++-- .github/workflows/schema.yaml | 4 +- .github/workflows/tests.yml | 44 ++++++++++----------- .github/workflows/triage_labelled.yml | 2 +- .github/workflows/twisted_trunk.yml | 10 ++--- 12 files changed, 48 insertions(+), 48 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 52a0762efc..d0cdd3acaf 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -31,7 +31,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Extract version from pyproject.toml # Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index 6a61dd5fb1..d59e069171 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -13,7 +13,7 @@ jobs: name: GitHub Pages runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: # Fetch all history so that the schema_versions script works. fetch-depth: 0 @@ -50,7 +50,7 @@ jobs: name: Check links in documentation runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Setup mdbook uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index f260a4f804..d02428db19 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -50,7 +50,7 @@ jobs: needs: - pre steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: # Fetch all history so that the schema_versions script works. fetch-depth: 0 diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml index c33481a51e..a5469be56c 100644 --- a/.github/workflows/fix_lint.yaml +++ b/.github/workflows/fix_lint.yaml @@ -18,7 +18,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 276b6cd383..eee8dc7e0b 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -42,7 +42,7 @@ jobs: if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: @@ -77,7 +77,7 @@ jobs: postgres-version: "14" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -152,7 +152,7 @@ jobs: BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -202,7 +202,7 @@ jobs: steps: - name: Check out synapse codebase - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: synapse @@ -234,7 +234,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml index 19468c2d92..62b796287c 100644 --- a/.github/workflows/poetry_lockfile.yaml +++ b/.github/workflows/poetry_lockfile.yaml @@ -16,7 +16,7 @@ jobs: name: "Check locked dependencies have sdists" runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: '3.x' diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index e08775e588..c562275a38 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -33,17 +33,17 @@ jobs: packages: write steps: - name: Checkout specific branch (debug build) - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 if: github.event_name == 'workflow_dispatch' with: ref: ${{ inputs.branch }} - name: Checkout clean copy of develop (scheduled build) - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 if: github.event_name == 'schedule' with: ref: develop - name: Checkout clean copy of master (on-push) - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 if: github.event_name == 'push' with: ref: master diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index c88546c3bf..e63d65fdf3 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -27,7 +27,7 @@ jobs: name: "Calculate list of debian distros" runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" @@ -55,7 +55,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: src @@ -132,7 +132,7 @@ jobs: os: "ubuntu-24.04-arm" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: @@ -170,7 +170,7 @@ jobs: if: ${{ !startsWith(github.ref, 'refs/pull/') }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.10" diff --git a/.github/workflows/schema.yaml b/.github/workflows/schema.yaml index 6c416e762d..52b5cd4c53 100644 --- a/.github/workflows/schema.yaml +++ b/.github/workflows/schema.yaml @@ -14,7 +14,7 @@ jobs: name: Ensure Synapse config schema is valid runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" @@ -40,7 +40,7 @@ jobs: name: Ensure generated documentation is up-to-date runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index ce01be153c..771474f74e 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -86,7 +86,7 @@ jobs: if: ${{ needs.changes.outputs.linting == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: @@ -106,7 +106,7 @@ jobs: if: ${{ needs.changes.outputs.linting == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" @@ -116,7 +116,7 @@ jobs: check-lockfile: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" @@ -129,7 +129,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Setup Poetry uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 @@ -151,7 +151,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -187,7 +187,7 @@ jobs: lint-crlf: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Check line endings run: scripts-dev/check_line_terminators.sh @@ -195,7 +195,7 @@ jobs: if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 @@ -213,7 +213,7 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -232,7 +232,7 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -250,7 +250,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -286,7 +286,7 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -306,7 +306,7 @@ jobs: needs: changes if: ${{ needs.changes.outputs.linting_readme == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" @@ -354,7 +354,7 @@ jobs: needs: linting-done runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" @@ -375,7 +375,7 @@ jobs: job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - run: sudo apt-get -qq install xmlsec1 - name: Set up PostgreSQL ${{ matrix.job.postgres-version }} if: ${{ matrix.job.postgres-version }} @@ -431,7 +431,7 @@ jobs: - changes runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -496,7 +496,7 @@ jobs: extras: ["all"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 # Install libs necessary for PyPy to build binary wheels for dependencies - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 @@ -546,7 +546,7 @@ jobs: job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Prepare test blacklist run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers @@ -593,7 +593,7 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - run: sudo apt-get -qq install xmlsec1 postgresql-client - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: @@ -637,7 +637,7 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Add PostgreSQL apt repository # We need a version of pg_dump that can handle the version of # PostgreSQL being tested against. The Ubuntu package repository lags @@ -692,7 +692,7 @@ jobs: steps: - name: Checkout synapse codebase - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: synapse @@ -728,7 +728,7 @@ jobs: - changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -748,7 +748,7 @@ jobs: - changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml index d291eea3a1..34222b7d1b 100644 --- a/.github/workflows/triage_labelled.yml +++ b/.github/workflows/triage_labelled.yml @@ -22,7 +22,7 @@ jobs: # This field is case-sensitive. TARGET_STATUS: Needs info steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: # Only clone the script file we care about, instead of the whole repo. sparse-checkout: .ci/scripts/triage_labelled_issue.sh diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index a252816e05..b07f98b1cb 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -43,7 +43,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -70,7 +70,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - run: sudo apt-get -qq install xmlsec1 - name: Install Rust @@ -117,7 +117,7 @@ jobs: - ${{ github.workspace }}:/src steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -175,7 +175,7 @@ jobs: steps: - name: Run actions/checkout@v4 for synapse - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: synapse @@ -217,7 +217,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 8b795836439dab16281e84a9404b22ff2438b7c1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 24 Nov 2025 18:04:07 +0000 Subject: [PATCH 09/47] Bump sentry-sdk from 2.44.0 to 2.46.0 (#19218) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [sentry-sdk](https://github.com/getsentry/sentry-python) from 2.44.0 to 2.46.0.
Release notes

Sourced from sentry-sdk's releases.

2.46.0

Various fixes & improvements

2.45.0

Various fixes & improvements

  • OTLPIntegration (#4877) by @​sl0thentr0py

    Enable the new OTLP integration with the code snippet below, and your OpenTelemetry instrumentation will be automatically sent to Sentry's OTLP ingestion endpoint.

      import sentry_sdk
      from sentry_sdk.integrations.otlp import OTLPIntegration
    

    sentry_sdk.init(
    dsn="<your-dsn>",
    # Add data like inputs and responses;
    # see https://docs.sentry.io/platforms/python/data-management/data-collected/ for more info
    send_default_pii=True,
    integrations=[
    OTLPIntegration(),
    ],
    )

    Under the hood, this will setup:

    • A SpanExporter that will automatically set up the OTLP ingestion endpoint from your DSN
    • A Propagator that ensures Distributed Tracing works
    • Trace/Span linking for all other Sentry events such as Errors, Logs, Crons and Metrics

    If you were using the SentrySpanProcessor before, we recommend migrating over to OTLPIntegration since it's a much simpler setup.

  • feat(integrations): implement context management for invoke_agent spans (#5089) by @​constantinius

  • feat(loguru): Capture extra (#5096) by @​sentrivana

  • feat: Attach server.address to metrics (#5113) by @​alexander-alderman-webb

  • fix: Cast message and detail attributes before appending exception notes (#5114) by @​alexander-alderman-webb

  • fix(integrations): ensure that GEN_AI_AGENT_NAME is properly set for GEN_AI spans under an invoke_agent span (#5030) by @​constantinius

  • fix(logs): Update sentry.origin (#5112) by @​sentrivana

  • chore: Deprecate description truncation option for Redis spans (#5073) by @​alexander-alderman-webb

  • chore: Deprecate max_spans LangChain parameter (#5074) by @​alexander-alderman-webb

  • chore(toxgen): Check availability of pip and add detail to exceptions (#5076) by @​alexander-alderman-webb

... (truncated)

Changelog

Sourced from sentry-sdk's changelog.

2.46.0

Various fixes & improvements

2.45.0

Various fixes & improvements

  • OTLPIntegration (#4877) by @​sl0thentr0py

    Enable the new OTLP integration with the code snippet below, and your OpenTelemetry instrumentation will be automatically sent to Sentry's OTLP ingestion endpoint.

      import sentry_sdk
      from sentry_sdk.integrations.otlp import OTLPIntegration
    

    sentry_sdk.init(
    dsn="<your-dsn>",
    # Add data like inputs and responses;
    # see https://docs.sentry.io/platforms/python/data-management/data-collected/ for more info
    send_default_pii=True,
    integrations=[
    OTLPIntegration(),
    ],
    )

    Under the hood, this will setup:

    • A SpanExporter that will automatically set up the OTLP ingestion endpoint from your DSN
    • A Propagator that ensures Distributed Tracing works
    • Trace/Span linking for all other Sentry events such as Errors, Logs, Crons and Metrics

    If you were using the SentrySpanProcessor before, we recommend migrating over to OTLPIntegration since it's a much simpler setup.

  • feat(integrations): implement context management for invoke_agent spans (#5089) by @​constantinius

  • feat(loguru): Capture extra (#5096) by @​sentrivana

  • feat: Attach server.address to metrics (#5113) by @​alexander-alderman-webb

  • fix: Cast message and detail attributes before appending exception notes (#5114) by @​alexander-alderman-webb

  • fix(integrations): ensure that GEN_AI_AGENT_NAME is properly set for GEN_AI spans under an invoke_agent span (#5030) by @​constantinius

  • fix(logs): Update sentry.origin (#5112) by @​sentrivana

  • chore: Deprecate description truncation option for Redis spans (#5073) by @​alexander-alderman-webb

... (truncated)

Commits
  • d3375bc Update CHANGELOG.md
  • 23abfe2 release: 2.46.0
  • ca19d63 feat: Preserve metadata on wrapped coroutines (#5105)
  • cf165e3 build(deps): bump actions/checkout from 5.0.0 to 6.0.0 (#5136)
  • b8d6a57 build(deps): bump actions/create-github-app-token from 2.1.4 to 2.2.0 (#5137)
  • c0c28b8 build(deps): bump supercharge/redis-github-action from 1.8.0 to 1.8.1 (#5138)
  • fb18c21 fix(pydantic-ai): Make imports defensive to avoid ModuleNotFoundError (#5135)
  • f945e38 Fix openai-agents import (#5132)
  • 8596f89 fix(integrations): enhance input handling for embeddings in LiteLLM integrati...
  • 0e6e808 test(openai-agents): Remove MagicMock from mocked ModelResponse (#5126)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=sentry-sdk&package-manager=pip&previous-version=2.44.0&new-version=2.46.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index 5a79b932a5..f723322a55 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2543,15 +2543,15 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.44.0" +version = "2.46.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" groups = ["main"] markers = "extra == \"all\" or extra == \"sentry\"" files = [ - {file = "sentry_sdk-2.44.0-py2.py3-none-any.whl", hash = "sha256:9e36a0372b881e8f92fdbff4564764ce6cec4b7f25424d0a3a8d609c9e4651a7"}, - {file = "sentry_sdk-2.44.0.tar.gz", hash = "sha256:5b1fe54dfafa332e900b07dd8f4dfe35753b64e78e7d9b1655a28fd3065e2493"}, + {file = "sentry_sdk-2.46.0-py2.py3-none-any.whl", hash = "sha256:4eeeb60198074dff8d066ea153fa6f241fef1668c10900ea53a4200abc8da9b1"}, + {file = "sentry_sdk-2.46.0.tar.gz", hash = "sha256:91821a23460725734b7741523021601593f35731808afc0bb2ba46c27b8acd91"}, ] [package.dependencies] @@ -2590,6 +2590,7 @@ openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] openfeature = ["openfeature-sdk (>=0.7.1)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] opentelemetry-experimental = ["opentelemetry-distro"] +opentelemetry-otlp = ["opentelemetry-distro[otlp] (>=0.35b0)"] pure-eval = ["asttokens", "executing", "pure_eval"] pydantic-ai = ["pydantic-ai (>=1.0.0)"] pymongo = ["pymongo (>=3.1)"] From db975ea10d58a89862d15e99170b0f76f409efd6 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 25 Nov 2025 10:20:47 +0000 Subject: [PATCH 10/47] Expire sliding sync connections (#19211) We add some logic to expire sliding sync connections if they get old or if there is too much pending data to return. The values of the constants are picked fairly arbitrarily, these are currently: 1. More than 100 rooms with pending events if the connection hasn't been used in over an hour 2. The connection hasn't been used for over a week Reviewable commit-by-commit --------- Co-authored-by: Eric Eastwood --- changelog.d/19211.misc | 1 + synapse/handlers/sliding_sync/room_lists.py | 59 +++++++++- synapse/handlers/sliding_sync/store.py | 2 +- .../storage/databases/main/sliding_sync.py | 68 ++++++++++- synapse/storage/databases/main/stream.py | 14 ++- .../main/delta/93/03_sss_pos_last_used.sql | 27 +++++ synapse/types/handlers/sliding_sync.py | 8 ++ synapse/util/constants.py | 1 + .../sliding_sync/test_connection_tracking.py | 108 ++++++++++++++++++ .../client/sliding_sync/test_sliding_sync.py | 28 ++++- 10 files changed, 304 insertions(+), 12 deletions(-) create mode 100644 changelog.d/19211.misc create mode 100644 synapse/storage/schema/main/delta/93/03_sss_pos_last_used.sql diff --git a/changelog.d/19211.misc b/changelog.d/19211.misc new file mode 100644 index 0000000000..d8a4a44662 --- /dev/null +++ b/changelog.d/19211.misc @@ -0,0 +1 @@ +Expire sliding sync connections that are too old or have too much pending data. diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 3d11902236..fa4ff22b64 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -34,10 +34,12 @@ EventTypes, Membership, ) +from synapse.api.errors import SlidingSyncUnknownPosition from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import StrippedStateEvent from synapse.events.utils import parse_stripped_state_event from synapse.logging.opentracing import start_active_span, trace +from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS_MS from synapse.storage.databases.main.state import ( ROOM_UNKNOWN_SENTINEL, Sentinel as StateSentinel, @@ -68,6 +70,7 @@ ) from synapse.types.state import StateFilter from synapse.util import MutableOverlayMapping +from synapse.util.constants import MILLISECONDS_PER_SECOND, ONE_HOUR_SECONDS from synapse.util.sentinel import Sentinel if TYPE_CHECKING: @@ -77,6 +80,27 @@ logger = logging.getLogger(__name__) +# Minimum time in milliseconds since the last sync before we consider expiring +# the connection due to too many rooms to send. This stops from getting into +# tight loops with clients that request lots of data at once. +# +# c.f. `NUM_ROOMS_THRESHOLD`. These values are somewhat arbitrary picked. +MINIMUM_NOT_USED_AGE_EXPIRY_MS = ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND + +# How many rooms with updates we allow before we consider the connection expired +# due to too many rooms to send. +# +# c.f. `MINIMUM_NOT_USED_AGE_EXPIRY_MS`. These values are somewhat arbitrary +# picked. +NUM_ROOMS_THRESHOLD = 100 + +# Sanity check that our minimum age is sensible compared to the update interval, +# i.e. if `MINIMUM_NOT_USED_AGE_EXPIRY_MS` is too small then we might expire the +# connection even if it is actively being used (and we're just not updating the +# DB frequently enough). We arbitrarily double the update interval to give some +# wiggle room. +assert 2 * UPDATE_INTERVAL_LAST_USED_TS_MS < MINIMUM_NOT_USED_AGE_EXPIRY_MS + # Helper definition for the types that we might return. We do this to avoid # copying data between types (which can be expensive for many rooms). RoomsForUserType = RoomsForUserStateReset | RoomsForUser | RoomsForUserSlidingSync @@ -176,6 +200,7 @@ def __init__(self, hs: "HomeServer"): self.storage_controllers = hs.get_storage_controllers() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.is_mine_id = hs.is_mine_id + self._clock = hs.get_clock() async def compute_interested_rooms( self, @@ -857,11 +882,41 @@ async def _filter_relevant_rooms_to_send( # We only need to check for new events since any state changes # will also come down as new events. - rooms_that_have_updates = ( - self.store.get_rooms_that_might_have_updates( + + rooms_that_have_updates = await ( + self.store.get_rooms_that_have_updates_since_sliding_sync_table( relevant_room_map.keys(), from_token.room_key ) ) + + # Check if we have lots of updates to send, if so then its + # better for us to tell the client to do a full resync + # instead (to try and avoid long SSS response times when + # there is new data). + # + # Due to the construction of the SSS API, the client is in + # charge of setting the range of rooms to request updates + # for. Generally, it will start with a small range and then + # expand (and occasionally it may contract the range again + # if its been offline for a while). If we know there are a + # lot of updates, it's better to reset the connection and + # wait for the client to start again (with a much smaller + # range) than to try and send down a large number of updates + # (which can take a long time). + # + # We only do this if the last sync was over + # `MINIMUM_NOT_USED_AGE_EXPIRY_MS` to ensure we don't get + # into tight loops with clients that keep requesting large + # sliding sync windows. + if len(rooms_that_have_updates) > NUM_ROOMS_THRESHOLD: + last_sync_ts = previous_connection_state.last_used_ts + if ( + last_sync_ts is not None + and (self._clock.time_msec() - last_sync_ts) + > MINIMUM_NOT_USED_AGE_EXPIRY_MS + ): + raise SlidingSyncUnknownPosition() + rooms_should_send.update(rooms_that_have_updates) relevant_rooms_to_send_map = { room_id: room_sync_config diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py index 7bcd5f27ea..d01fab271f 100644 --- a/synapse/handlers/sliding_sync/store.py +++ b/synapse/handlers/sliding_sync/store.py @@ -75,7 +75,7 @@ async def get_and_clear_connection_positions( """ # If this is our first request, there is no previous connection state to fetch out of the database if from_token is None or from_token.connection_position == 0: - return PerConnectionState() + return PerConnectionState(last_used_ts=None) conn_id = sync_config.conn_id or "" diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index 2b67e75ac4..8cd3de8f40 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -20,6 +20,7 @@ from synapse.api.errors import SlidingSyncUnknownPosition from synapse.logging.opentracing import log_kv +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( DatabasePool, @@ -36,6 +37,12 @@ RoomSyncConfig, ) from synapse.util.caches.descriptors import cached +from synapse.util.constants import ( + MILLISECONDS_PER_SECOND, + ONE_DAY_SECONDS, + ONE_HOUR_SECONDS, + ONE_MINUTE_SECONDS, +) from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -45,6 +52,21 @@ logger = logging.getLogger(__name__) +# How often to update the `last_used_ts` column on +# `sliding_sync_connection_positions` when the client uses a connection +# position. We don't want to update it on every use to avoid excessive +# writes, but we want it to be reasonably up-to-date to help with +# cleaning up old connection positions. +UPDATE_INTERVAL_LAST_USED_TS_MS = 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND + +# Time in milliseconds the connection hasn't been used before we consider it +# expired and delete it. +CONNECTION_EXPIRY_MS = 7 * ONE_DAY_SECONDS * MILLISECONDS_PER_SECOND + +# How often we run the background process to delete old sliding sync connections. +CONNECTION_EXPIRY_FREQUENCY_MS = ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND + + class SlidingSyncStore(SQLBaseStore): def __init__( self, @@ -76,6 +98,12 @@ def __init__( replaces_index="sliding_sync_membership_snapshots_user_id", ) + if self.hs.config.worker.run_background_tasks: + self.clock.looping_call( + self.delete_old_sliding_sync_connections, + CONNECTION_EXPIRY_FREQUENCY_MS, + ) + async def get_latest_bump_stamp_for_room( self, room_id: str, @@ -202,6 +230,7 @@ def persist_per_connection_state_txn( "effective_device_id": device_id, "conn_id": conn_id, "created_ts": self.clock.time_msec(), + "last_used_ts": self.clock.time_msec(), }, returning=("connection_key",), ) @@ -384,7 +413,7 @@ def _get_and_clear_connection_positions_txn( # The `previous_connection_position` is a user-supplied value, so we # need to make sure that the one they supplied is actually theirs. sql = """ - SELECT connection_key + SELECT connection_key, last_used_ts FROM sliding_sync_connection_positions INNER JOIN sliding_sync_connections USING (connection_key) WHERE @@ -396,7 +425,20 @@ def _get_and_clear_connection_positions_txn( if row is None: raise SlidingSyncUnknownPosition() - (connection_key,) = row + (connection_key, last_used_ts) = row + + # Update the `last_used_ts` if it's due to be updated. We don't update + # every time to avoid excessive writes. + now = self.clock.time_msec() + if last_used_ts is None or now - last_used_ts > UPDATE_INTERVAL_LAST_USED_TS_MS: + self.db_pool.simple_update_txn( + txn, + table="sliding_sync_connections", + keyvalues={ + "connection_key": connection_key, + }, + updatevalues={"last_used_ts": now}, + ) # Now that we have seen the client has received and used the connection # position, we can delete all the other connection positions. @@ -480,12 +522,30 @@ def _get_and_clear_connection_positions_txn( logger.warning("Unrecognized sliding sync stream in DB %r", stream) return PerConnectionStateDB( + last_used_ts=last_used_ts, rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), account_data=RoomStatusMap(account_data), room_configs=room_configs, ) + @wrap_as_background_process("delete_old_sliding_sync_connections") + async def delete_old_sliding_sync_connections(self) -> None: + """Delete sliding sync connections that have not been used for a long time.""" + cutoff_ts = self.clock.time_msec() - CONNECTION_EXPIRY_MS + + def delete_old_sliding_sync_connections_txn(txn: LoggingTransaction) -> None: + sql = """ + DELETE FROM sliding_sync_connections + WHERE last_used_ts IS NOT NULL AND last_used_ts < ? + """ + txn.execute(sql, (cutoff_ts,)) + + await self.db_pool.runInteraction( + "delete_old_sliding_sync_connections", + delete_old_sliding_sync_connections_txn, + ) + @attr.s(auto_attribs=True, frozen=True) class PerConnectionStateDB: @@ -498,6 +558,8 @@ class PerConnectionStateDB: When persisting this *only* contains updates to the state. """ + last_used_ts: int | None + rooms: "RoomStatusMap[str]" receipts: "RoomStatusMap[str]" account_data: "RoomStatusMap[str]" @@ -553,6 +615,7 @@ async def from_state( ) return PerConnectionStateDB( + last_used_ts=per_connection_state.last_used_ts, rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), account_data=RoomStatusMap(account_data), @@ -596,6 +659,7 @@ async def to_state(self, store: "DataStore") -> "PerConnectionState": } return PerConnectionState( + last_used_ts=self.last_used_ts, rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), account_data=RoomStatusMap(account_data), diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 8644ff412e..8fa1e2e5a9 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -740,7 +740,14 @@ async def get_rooms_that_have_updates_since_sliding_sync_table( from_key: RoomStreamToken, ) -> StrCollection: """Return the rooms that probably have had updates since the given - token (changes that are > `from_key`).""" + token (changes that are > `from_key`). + + May return false positives, but must not return false negatives. + + If `have_finished_sliding_sync_background_jobs` is False, then we return + all the room IDs, as we can't be sure that the sliding sync table is + fully populated. + """ # If the stream change cache is valid for the stream token, we can just # use the result of that. if from_key.stream >= self._events_stream_cache.get_earliest_known_position(): @@ -748,6 +755,11 @@ async def get_rooms_that_have_updates_since_sliding_sync_table( room_ids, from_key.stream ) + if not self.have_finished_sliding_sync_background_jobs(): + # If the table hasn't been populated yet, we have to assume all rooms + # have updates. + return room_ids + def get_rooms_that_have_updates_since_sliding_sync_table_txn( txn: LoggingTransaction, ) -> StrCollection: diff --git a/synapse/storage/schema/main/delta/93/03_sss_pos_last_used.sql b/synapse/storage/schema/main/delta/93/03_sss_pos_last_used.sql new file mode 100644 index 0000000000..747ba7a144 --- /dev/null +++ b/synapse/storage/schema/main/delta/93/03_sss_pos_last_used.sql @@ -0,0 +1,27 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 Element Creations, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +-- Add a timestamp for when the sliding sync connection position was last used, +-- only updated with a small granularity. +-- +-- This should be NOT NULL, but we need to consider existing rows. In future we +-- may want to either backfill this or delete all rows with a NULL value (and +-- then make it NOT NULL). +ALTER TABLE sliding_sync_connections ADD COLUMN last_used_ts BIGINT; + +-- Note: We don't add an index on this column to allow HOT updates on PostgreSQL +-- to reduce the cost of the updates to the column. c.f. +-- https://www.postgresql.org/docs/current/storage-hot.html +-- +-- We do query this column directly to find expired connections, but we expect +-- that to be an infrequent operation and a sequential scan should be fine. diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index 494e3570d0..03b3bcb3ca 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -850,12 +850,16 @@ class PerConnectionState: since the last time you made a sync request. Attributes: + last_used_ts: The time this connection was last used, in milliseconds. + This is only accurate to `UPDATE_CONNECTION_STATE_EVERY_MS`. rooms: The status of each room for the events stream. receipts: The status of each room for the receipts stream. room_configs: Map from room_id to the `RoomSyncConfig` of all rooms that we have previously sent down. """ + last_used_ts: int | None = None + rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap) receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap) account_data: RoomStatusMap[int] = attr.Factory(RoomStatusMap) @@ -867,6 +871,7 @@ def get_mutable(self) -> "MutablePerConnectionState": room_configs = cast(MutableMapping[str, RoomSyncConfig], self.room_configs) return MutablePerConnectionState( + last_used_ts=self.last_used_ts, rooms=self.rooms.get_mutable(), receipts=self.receipts.get_mutable(), account_data=self.account_data.get_mutable(), @@ -875,6 +880,7 @@ def get_mutable(self) -> "MutablePerConnectionState": def copy(self) -> "PerConnectionState": return PerConnectionState( + last_used_ts=self.last_used_ts, rooms=self.rooms.copy(), receipts=self.receipts.copy(), account_data=self.account_data.copy(), @@ -889,6 +895,8 @@ def __len__(self) -> int: class MutablePerConnectionState(PerConnectionState): """A mutable version of `PerConnectionState`""" + last_used_ts: int | None + rooms: MutableRoomStatusMap[RoomStreamToken] receipts: MutableRoomStatusMap[MultiWriterStreamToken] account_data: MutableRoomStatusMap[int] diff --git a/synapse/util/constants.py b/synapse/util/constants.py index 7a3d073df5..f4491b5885 100644 --- a/synapse/util/constants.py +++ b/synapse/util/constants.py @@ -18,5 +18,6 @@ # readability and catching bugs. ONE_MINUTE_SECONDS = 60 ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS +ONE_DAY_SECONDS = 24 * ONE_HOUR_SECONDS MILLISECONDS_PER_SECOND = 1000 diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py index 16d13fcc86..cdf63317e3 100644 --- a/tests/rest/client/sliding_sync/test_connection_tracking.py +++ b/tests/rest/client/sliding_sync/test_connection_tracking.py @@ -12,6 +12,7 @@ # . # import logging +from unittest.mock import patch from parameterized import parameterized, parameterized_class @@ -19,8 +20,11 @@ import synapse.rest.admin from synapse.api.constants import EventTypes +from synapse.api.errors import Codes +from synapse.handlers.sliding_sync import room_lists from synapse.rest.client import login, room, sync from synapse.server import HomeServer +from synapse.storage.databases.main.sliding_sync import CONNECTION_EXPIRY_MS from synapse.util.clock import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase @@ -395,3 +399,107 @@ def test_rooms_timeline_incremental_sync_NEVER(self) -> None: ) self.assertEqual(response_body["rooms"][room_id1]["limited"], True) self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + + @patch("synapse.handlers.sliding_sync.room_lists.NUM_ROOMS_THRESHOLD", new=5) + def test_sliding_sync_connection_expires_with_too_much_data(self) -> None: + """ + Test that if we have too much data to send down for incremental sync, + we expire the connection and ask the client to do a full resync. + + Connections are only expired if they have not been used for a minimum + amount of time (MINIMUM_NOT_USED_AGE_EXPIRY_MS) to avoid expiring + connections that are actively being used. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create enough rooms that we can later trigger the too much data case + room_ids = [] + for _ in range(room_lists.NUM_ROOMS_THRESHOLD + 2): + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + room_ids.append(room_id) + + # Make sure we don't hit ratelimits + self.reactor.advance(60 * 1000) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1000]], + "required_state": [], + "timeline_limit": 1, + } + } + } + + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Check we got all the rooms down + for room_id in room_ids: + self.assertIn(room_id, response_body["rooms"]) + + # Send a lot of events to cause the connection to expire + for room_id in room_ids: + self.helper.send(room_id, "msg", tok=user2_tok) + + # If we don't advance the clock then we won't expire the connection. + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send some more events. + for room_id in room_ids: + self.helper.send(room_id, "msg", tok=user2_tok) + + # Advance the clock to ensure that the last_used_ts is old enough + self.reactor.advance(2 * room_lists.MINIMUM_NOT_USED_AGE_EXPIRY_MS / 1000) + + # This sync should now raise SlidingSyncUnknownPosition + channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok) + self.assertEqual(channel.code, 400) + self.assertEqual(channel.json_body["errcode"], Codes.UNKNOWN_POS) + + def test_sliding_sync_connection_expires_after_time(self) -> None: + """ + Test that if we don't use a sliding sync connection for a long time, + we expire the connection and ask the client to do a full resync. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1000]], + "required_state": [], + "timeline_limit": 1, + } + } + } + + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # We can keep syncing so long as the interval between requests is less + # than CONNECTION_EXPIRY_MS + for _ in range(5): + self.reactor.advance(0.5 * CONNECTION_EXPIRY_MS / 1000) + + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # ... but if we wait too long, the connection expires + self.reactor.advance(1 + CONNECTION_EXPIRY_MS / 1000) + + # This sync should now raise SlidingSyncUnknownPosition + channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok) + self.assertEqual(channel.code, 400) + self.assertEqual(channel.json_body["errcode"], Codes.UNKNOWN_POS) diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index c27a712088..bcd22d15ca 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -46,7 +46,7 @@ from synapse.util.stringutils import random_string from tests import unittest -from tests.server import TimedOutException +from tests.server import FakeChannel, TimedOutException from tests.test_utils.event_injection import create_event logger = logging.getLogger(__name__) @@ -80,12 +80,10 @@ def default_config(self) -> JsonDict: config["experimental_features"] = {"msc3575_enabled": True} return config - def do_sync( + def make_sync_request( self, sync_body: JsonDict, *, since: str | None = None, tok: str - ) -> tuple[JsonDict, str]: - """Do a sliding sync request with given body. - - Asserts the request was successful. + ) -> FakeChannel: + """Make a sliding sync request with given body. Attributes: sync_body: The full request body to use @@ -106,6 +104,24 @@ def do_sync( content=sync_body, access_token=tok, ) + return channel + + def do_sync( + self, sync_body: JsonDict, *, since: str | None = None, tok: str + ) -> tuple[JsonDict, str]: + """Do a sliding sync request with given body. + + Asserts the request was successful. + + Attributes: + sync_body: The full request body to use + since: Optional since token + tok: Access token to use + + Returns: + A tuple of the response body and the `pos` field. + """ + channel = self.make_sync_request(sync_body, since=since, tok=tok) self.assertEqual(channel.code, 200, channel.json_body) return channel.json_body, channel.json_body["pos"] From b7e592a88c0f8c0b2ceaa9e88c53de661d22e6f2 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 25 Nov 2025 14:09:48 +0000 Subject: [PATCH 11/47] Allow `ruff` to auto-fix trailing spaces in multi-line comments (#19221) --- changelog.d/19221.misc | 1 + pyproject.toml | 2 ++ 2 files changed, 3 insertions(+) create mode 100644 changelog.d/19221.misc diff --git a/changelog.d/19221.misc b/changelog.d/19221.misc new file mode 100644 index 0000000000..d1faf9cb72 --- /dev/null +++ b/changelog.d/19221.misc @@ -0,0 +1 @@ +Auto-fix trailing spaces in multi-line strings and comments when running the lint script. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index 3e23823362..b795cba238 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -269,6 +269,8 @@ extend-safe-fixes = [ "UP007", # pyupgrade rules compatible with Python >= 3.10 "UP045", + # Allow ruff to automatically fix trailing spaces within a multi-line string/comment. + "W293" ] [tool.ruff.lint.isort] From ba65d8c351245090b87f3004ab64d708e6a08e67 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Tue, 25 Nov 2025 18:03:33 +0000 Subject: [PATCH 12/47] Put MSC2666 endpoint behind an experimental flag (#19219) --- changelog.d/19219.misc | 1 + docs/upgrade.md | 11 +++++++++++ synapse/config/experimental.py | 3 +++ synapse/rest/client/mutual_rooms.py | 3 ++- synapse/rest/client/versions.py | 2 +- tests/rest/client/test_mutual_rooms.py | 21 +++++++++++++++++++++ 6 files changed, 39 insertions(+), 2 deletions(-) create mode 100644 changelog.d/19219.misc diff --git a/changelog.d/19219.misc b/changelog.d/19219.misc new file mode 100644 index 0000000000..8355729358 --- /dev/null +++ b/changelog.d/19219.misc @@ -0,0 +1 @@ +Require an experimental feature flag to be enabled in order for the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) to be available. \ No newline at end of file diff --git a/docs/upgrade.md b/docs/upgrade.md index 20b7e952b2..350b71fe47 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,17 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.144.0 + +## Unstable mutual rooms endpoint is now behind an experimental feature flag + +The unstable mutual rooms endpoint from +[MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) +(`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) is now +disabled by default. If you rely on this unstable endpoint, you must now set +`experimental_features.msc2666_enabled: true` in your configuration to keep +using it. + # Upgrading to v1.143.0 ## Dropping support for PostgreSQL 13 diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 52c3ec0da2..566071eef3 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -438,6 +438,9 @@ def read_config( # previously calculated push actions. self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False) + # MSC2666: Query mutual rooms between two users. + self.msc2666_enabled: bool = experimental.get("msc2666_enabled", False) + # MSC2815 (allow room moderators to view redacted event content) self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False) diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py index bda6ed1f70..3e5316c4b7 100644 --- a/synapse/rest/client/mutual_rooms.py +++ b/synapse/rest/client/mutual_rooms.py @@ -90,4 +90,5 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - UserMutualRoomsServlet(hs).register(http_server) + if hs.config.experimental.msc2666_enabled: + UserMutualRoomsServlet(hs).register(http_server) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index dee2cdb637..a0178e473d 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -124,7 +124,7 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: # Implements additional endpoints as described in MSC2432 "org.matrix.msc2432": True, # Implements additional endpoints as described in MSC2666 - "uk.half-shot.msc2666.query_mutual_rooms": True, + "uk.half-shot.msc2666.query_mutual_rooms": self.config.experimental.msc2666_enabled, # Whether new rooms will be set to encrypted or not (based on presets). "io.element.e2ee_forced.public": self.e2ee_forced_public, "io.element.e2ee_forced.private": self.e2ee_forced_private, diff --git a/tests/rest/client/test_mutual_rooms.py b/tests/rest/client/test_mutual_rooms.py index 8580d09006..ea063707aa 100644 --- a/tests/rest/client/test_mutual_rooms.py +++ b/tests/rest/client/test_mutual_rooms.py @@ -43,6 +43,12 @@ class UserMutualRoomsTest(unittest.HomeserverTestCase): mutual_rooms.register_servlets, ] + def default_config(self) -> dict: + config = super().default_config() + experimental = config.setdefault("experimental_features", {}) + experimental.setdefault("msc2666_enabled", True) + return config + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() return self.setup_test_homeserver(config=config) @@ -58,6 +64,21 @@ def _get_mutual_rooms(self, token: str, other_user: str) -> FakeChannel: access_token=token, ) + @unittest.override_config({"experimental_features": {"msc2666_enabled": False}}) + def test_mutual_rooms_no_experimental_flag(self) -> None: + """ + The endpoint should 404 if the experimental flag is not enabled. + """ + # Register a user. + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + + # Check that we're unable to query the endpoint due to the endpoint + # being unrecognised. + channel = self._get_mutual_rooms(u1_token, "@not-used:test") + self.assertEqual(404, channel.code, channel.result) + self.assertEqual("M_UNRECOGNIZED", channel.json_body["errcode"], channel.result) + def test_shared_room_list_public(self) -> None: """ A room should show up in the shared list of rooms between two users From 2741ead56903dafd33126a7b970222f9bc9c202f Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 26 Nov 2025 10:32:39 +0000 Subject: [PATCH 13/47] Stop building wheels for MacOS (#19225) --- changelog.d/19225.removal | 1 + pyproject.toml | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19225.removal diff --git a/changelog.d/19225.removal b/changelog.d/19225.removal new file mode 100644 index 0000000000..bed5db07e8 --- /dev/null +++ b/changelog.d/19225.removal @@ -0,0 +1 @@ +Stop building release wheels for MacOS. \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index b483c903f6..fabc483b3c 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -395,7 +395,8 @@ build-backend = "poetry.core.masonry.api" # We skip: # - free-threaded cpython builds: these are not currently supported. # - i686: We don't support 32-bit platforms. -skip = "cp3??t-* *i686*" +# - *macosx*: we don't support building wheels for MacOS. +skip = "cp3??t-* *i686* *macosx*" # Enable non-default builds. See the list of available options: # https://cibuildwheel.pypa.io/en/stable/options#enable # From b74c29f6948d9f2de971fe4bf61798a58b2796dc Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Wed, 26 Nov 2025 10:56:59 +0000 Subject: [PATCH 14/47] Move towards a dedicated `Duration` class (#19223) We have various constants to try and avoid mistyping of durations, e.g. `ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND`, however this can get a little verbose and doesn't help with typing. Instead, let's move towards a dedicated `Duration` class (basically a [`timedelta`](https://docs.python.org/3/library/datetime.html#timedelta-objects) with helper methods). This PR introduces the new types and converts all usages of the existing constants with it. Future PRs may work to move the clock methods to also use it (e.g. `call_later` and `looping_call`). Reviewable commit-by-commit. --- changelog.d/19223.misc | 1 + synapse/app/phone_stats_home.py | 18 ++++----- synapse/handlers/sliding_sync/room_lists.py | 10 ++--- synapse/handlers/worker_lock.py | 6 +-- synapse/rest/client/transactions.py | 7 ++-- synapse/storage/databases/main/deviceinbox.py | 13 +++--- .../storage/databases/main/sliding_sync.py | 22 +++++----- synapse/util/__init__.py | 9 ----- synapse/util/background_queue.py | 6 +-- synapse/util/constants.py | 23 ----------- synapse/util/duration.py | 40 +++++++++++++++++++ tests/metrics/test_phone_home_stats.py | 4 +- .../sliding_sync/test_connection_tracking.py | 12 +++--- tests/rest/client/test_transactions.py | 9 ++--- .../databases/main/test_deviceinbox.py | 8 ++-- 15 files changed, 95 insertions(+), 93 deletions(-) create mode 100644 changelog.d/19223.misc delete mode 100644 synapse/util/constants.py create mode 100644 synapse/util/duration.py diff --git a/changelog.d/19223.misc b/changelog.d/19223.misc new file mode 100644 index 0000000000..8caebead72 --- /dev/null +++ b/changelog.d/19223.misc @@ -0,0 +1 @@ +Move towards using a dedicated `Duration` type. diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 13a0e3db7c..d278e30850 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -30,24 +30,20 @@ from synapse.metrics import SERVER_NAME_LABEL from synapse.types import JsonDict -from synapse.util.constants import ( - MILLISECONDS_PER_SECOND, - ONE_HOUR_SECONDS, - ONE_MINUTE_SECONDS, -) +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger("synapse.app.homeserver") -INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS +INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME = Duration(minutes=5) """ We wait 5 minutes to send the first set of stats as the server can be quite busy the first few minutes """ -PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS +PHONE_HOME_INTERVAL = Duration(hours=3) """ Phone home stats are sent every 3 hours """ @@ -222,13 +218,13 @@ def performance_stats_init() -> None: # table will decrease clock.looping_call( hs.get_datastores().main.generate_user_daily_visits, - 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND, + Duration(minutes=5).as_millis(), ) # monthly active user limiting functionality clock.looping_call( hs.get_datastores().main.reap_monthly_active_users, - ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND, + Duration(hours=1).as_millis(), ) hs.get_datastores().main.reap_monthly_active_users() @@ -274,7 +270,7 @@ async def _generate_monthly_active_users() -> None: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call( phone_stats_home, - PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND, + PHONE_HOME_INTERVAL.as_millis(), hs, stats, ) @@ -289,7 +285,7 @@ async def _generate_monthly_active_users() -> None: # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later( - INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS, + INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME.as_secs(), phone_stats_home, hs, stats, diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index fa4ff22b64..8969d91583 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -39,7 +39,7 @@ from synapse.events import StrippedStateEvent from synapse.events.utils import parse_stripped_state_event from synapse.logging.opentracing import start_active_span, trace -from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS_MS +from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS from synapse.storage.databases.main.state import ( ROOM_UNKNOWN_SENTINEL, Sentinel as StateSentinel, @@ -70,7 +70,7 @@ ) from synapse.types.state import StateFilter from synapse.util import MutableOverlayMapping -from synapse.util.constants import MILLISECONDS_PER_SECOND, ONE_HOUR_SECONDS +from synapse.util.duration import Duration from synapse.util.sentinel import Sentinel if TYPE_CHECKING: @@ -85,7 +85,7 @@ # tight loops with clients that request lots of data at once. # # c.f. `NUM_ROOMS_THRESHOLD`. These values are somewhat arbitrary picked. -MINIMUM_NOT_USED_AGE_EXPIRY_MS = ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND +MINIMUM_NOT_USED_AGE_EXPIRY = Duration(hours=1) # How many rooms with updates we allow before we consider the connection expired # due to too many rooms to send. @@ -99,7 +99,7 @@ # connection even if it is actively being used (and we're just not updating the # DB frequently enough). We arbitrarily double the update interval to give some # wiggle room. -assert 2 * UPDATE_INTERVAL_LAST_USED_TS_MS < MINIMUM_NOT_USED_AGE_EXPIRY_MS +assert 2 * UPDATE_INTERVAL_LAST_USED_TS < MINIMUM_NOT_USED_AGE_EXPIRY # Helper definition for the types that we might return. We do this to avoid # copying data between types (which can be expensive for many rooms). @@ -913,7 +913,7 @@ async def _filter_relevant_rooms_to_send( if ( last_sync_ts is not None and (self._clock.time_msec() - last_sync_ts) - > MINIMUM_NOT_USED_AGE_EXPIRY_MS + > MINIMUM_NOT_USED_AGE_EXPIRY.as_millis() ): raise SlidingSyncUnknownPosition() diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index 0e3fab292f..4f9c632f5c 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -39,7 +39,7 @@ from synapse.storage.databases.main.lock import Lock, LockStore from synapse.util.async_helpers import timeout_deferred from synapse.util.clock import Clock -from synapse.util.constants import ONE_MINUTE_SECONDS +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.logging.opentracing import opentracing @@ -276,7 +276,7 @@ async def __aexit__( def _get_next_retry_interval(self) -> float: next = self._retry_interval self._retry_interval = max(5, next * 2) - if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations + if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations logger.warning( "Lock timeout is getting excessive: %ss. There may be a deadlock.", self._retry_interval, @@ -363,7 +363,7 @@ async def __aexit__( def _get_next_retry_interval(self) -> float: next = self._retry_interval self._retry_interval = max(5, next * 2) - if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations + if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations logger.warning( "Lock timeout is getting excessive: %ss. There may be a deadlock.", self._retry_interval, diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 4b3656a597..0c1ac1f11b 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -34,13 +34,14 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import JsonDict, Requester from synapse.util.async_helpers import ObservableDeferred +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -CLEANUP_PERIOD_MS = 1000 * 60 * 30 # 30 mins +CLEANUP_PERIOD = Duration(minutes=30) P = ParamSpec("P") @@ -56,7 +57,7 @@ def __init__(self, hs: "HomeServer"): ] = {} # Try to clean entries every 30 mins. This means entries will exist # for at *LEAST* 30 mins, and at *MOST* 60 mins. - self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS) + self.clock.looping_call(self._cleanup, CLEANUP_PERIOD.as_millis()) def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable: """A helper function which returns a transaction key that can be used @@ -145,5 +146,5 @@ def _cleanup(self) -> None: now = self.clock.time_msec() for key in list(self.transactions): ts = self.transactions[key][1] - if now > (ts + CLEANUP_PERIOD_MS): # after cleanup period + if now > (ts + CLEANUP_PERIOD.as_millis()): # after cleanup period del self.transactions[key] diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index a12411d723..28e706d5c3 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -48,9 +48,9 @@ ) from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import JsonDict, StrCollection -from synapse.util import Duration from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.json import json_encoder from synapse.util.stringutils import parse_and_validate_server_name @@ -62,10 +62,10 @@ # How long to keep messages in the device federation inbox before deleting them. -DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS = 7 * Duration.DAY_MS +DEVICE_FEDERATION_INBOX_CLEANUP_DELAY = Duration(days=7) # How often to run the task to clean up old device_federation_inbox rows. -DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS = 5 * Duration.MINUTE_MS +DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL = Duration(minutes=5) # Update name for the device federation inbox received timestamp index. DEVICE_FEDERATION_INBOX_RECEIVED_INDEX_UPDATE = ( @@ -152,7 +152,7 @@ def __init__( if hs.config.worker.run_background_tasks: self.clock.looping_call( run_as_background_process, - DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS, + DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL.as_millis(), "_delete_old_federation_inbox_rows", self.server_name, self._delete_old_federation_inbox_rows, @@ -996,9 +996,10 @@ async def _delete_old_federation_inbox_rows(self, batch_size: int = 1000) -> Non def _delete_old_federation_inbox_rows_txn(txn: LoggingTransaction) -> bool: # We delete at most 100 rows that are older than - # DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS + # DEVICE_FEDERATION_INBOX_CLEANUP_DELAY delete_before_ts = ( - self.clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS + self.clock.time_msec() + - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_millis() ) sql = """ WITH to_delete AS ( diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index 8cd3de8f40..6f87308cde 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -37,12 +37,7 @@ RoomSyncConfig, ) from synapse.util.caches.descriptors import cached -from synapse.util.constants import ( - MILLISECONDS_PER_SECOND, - ONE_DAY_SECONDS, - ONE_HOUR_SECONDS, - ONE_MINUTE_SECONDS, -) +from synapse.util.duration import Duration from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -57,14 +52,14 @@ # position. We don't want to update it on every use to avoid excessive # writes, but we want it to be reasonably up-to-date to help with # cleaning up old connection positions. -UPDATE_INTERVAL_LAST_USED_TS_MS = 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND +UPDATE_INTERVAL_LAST_USED_TS = Duration(minutes=5) # Time in milliseconds the connection hasn't been used before we consider it # expired and delete it. -CONNECTION_EXPIRY_MS = 7 * ONE_DAY_SECONDS * MILLISECONDS_PER_SECOND +CONNECTION_EXPIRY = Duration(days=7) # How often we run the background process to delete old sliding sync connections. -CONNECTION_EXPIRY_FREQUENCY_MS = ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND +CONNECTION_EXPIRY_FREQUENCY = Duration(hours=1) class SlidingSyncStore(SQLBaseStore): @@ -101,7 +96,7 @@ def __init__( if self.hs.config.worker.run_background_tasks: self.clock.looping_call( self.delete_old_sliding_sync_connections, - CONNECTION_EXPIRY_FREQUENCY_MS, + CONNECTION_EXPIRY_FREQUENCY.as_millis(), ) async def get_latest_bump_stamp_for_room( @@ -430,7 +425,10 @@ def _get_and_clear_connection_positions_txn( # Update the `last_used_ts` if it's due to be updated. We don't update # every time to avoid excessive writes. now = self.clock.time_msec() - if last_used_ts is None or now - last_used_ts > UPDATE_INTERVAL_LAST_USED_TS_MS: + if ( + last_used_ts is None + or now - last_used_ts > UPDATE_INTERVAL_LAST_USED_TS.as_millis() + ): self.db_pool.simple_update_txn( txn, table="sliding_sync_connections", @@ -532,7 +530,7 @@ def _get_and_clear_connection_positions_txn( @wrap_as_background_process("delete_old_sliding_sync_connections") async def delete_old_sliding_sync_connections(self) -> None: """Delete sliding sync connections that have not been used for a long time.""" - cutoff_ts = self.clock.time_msec() - CONNECTION_EXPIRY_MS + cutoff_ts = self.clock.time_msec() - CONNECTION_EXPIRY.as_millis() def delete_old_sliding_sync_connections_txn(txn: LoggingTransaction) -> None: sql = """ diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index f937080f9e..fbd01914d5 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -41,15 +41,6 @@ logger = logging.getLogger(__name__) -class Duration: - """Helper class that holds constants for common time durations in - milliseconds.""" - - MINUTE_MS = 60 * 1000 - HOUR_MS = 60 * MINUTE_MS - DAY_MS = 24 * HOUR_MS - - def unwrapFirstError(failure: Failure) -> Failure: # Deprecated: you probably just want to catch defer.FirstError and reraise # the subFailure's value, which will do a better job of preserving stacktraces. diff --git a/synapse/util/background_queue.py b/synapse/util/background_queue.py index 93ffd9f271..dfea7247f4 100644 --- a/synapse/util/background_queue.py +++ b/synapse/util/background_queue.py @@ -27,7 +27,7 @@ from twisted.internet import defer from synapse.util.async_helpers import DeferredEvent -from synapse.util.constants import MILLISECONDS_PER_SECOND +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -67,7 +67,7 @@ def __init__( self._hs = hs self._name = name self._callback = callback - self._timeout_ms = timeout_ms + self._timeout_ms = Duration(milliseconds=timeout_ms) # The queue of items to process. self._queue: collections.deque[T] = collections.deque() @@ -125,7 +125,7 @@ async def _process_queue(self) -> None: # just loop round, clear the event, recheck the queue, and then # wait here again. new_data = await self._wakeup_event.wait( - timeout_seconds=self._timeout_ms / MILLISECONDS_PER_SECOND + timeout_seconds=self._timeout_ms.as_secs() ) if not new_data: # Timed out waiting for new data, so exit the loop diff --git a/synapse/util/constants.py b/synapse/util/constants.py deleted file mode 100644 index f4491b5885..0000000000 --- a/synapse/util/constants.py +++ /dev/null @@ -1,23 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright (C) 2025 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# . -# - -# Time-based constants. -# -# Laying these out incrementally, even if only some are required, helps with -# readability and catching bugs. -ONE_MINUTE_SECONDS = 60 -ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS -ONE_DAY_SECONDS = 24 * ONE_HOUR_SECONDS - -MILLISECONDS_PER_SECOND = 1000 diff --git a/synapse/util/duration.py b/synapse/util/duration.py new file mode 100644 index 0000000000..3419f6dda6 --- /dev/null +++ b/synapse/util/duration.py @@ -0,0 +1,40 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 Element Creations Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + +from datetime import timedelta + +# Constant so we don't keep creating new timedelta objects when calling +# `.as_millis()`. +_ONE_MILLISECOND = timedelta(milliseconds=1) + + +class Duration(timedelta): + """A subclass of timedelta that adds a convenience method for getting + the duration in milliseconds. + + Examples: + + ``` + duration = Duration(hours=2) + print(duration.as_millis()) # Outputs: 7200000 + ``` + """ + + def as_millis(self) -> int: + """Returns the duration in milliseconds.""" + return int(self / _ONE_MILLISECOND) + + def as_secs(self) -> int: + """Returns the duration in seconds.""" + return int(self.total_seconds()) diff --git a/tests/metrics/test_phone_home_stats.py b/tests/metrics/test_phone_home_stats.py index 4462385dae..dfb88588cd 100644 --- a/tests/metrics/test_phone_home_stats.py +++ b/tests/metrics/test_phone_home_stats.py @@ -17,7 +17,7 @@ from twisted.internet.testing import MemoryReactor from synapse.app.phone_stats_home import ( - PHONE_HOME_INTERVAL_SECONDS, + PHONE_HOME_INTERVAL, start_phone_stats_home, ) from synapse.rest import admin, login, register, room @@ -78,7 +78,7 @@ def prepare( def _get_latest_phone_home_stats(self) -> JsonDict: # Wait for `phone_stats_home` to be called again + a healthy margin (50s). - self.reactor.advance(2 * PHONE_HOME_INTERVAL_SECONDS + 50) + self.reactor.advance(2 * PHONE_HOME_INTERVAL.as_secs() + 50) # Extract the reported stats from our http client mock mock_calls = self.put_json_mock.call_args_list diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py index cdf63317e3..44e7fa4726 100644 --- a/tests/rest/client/sliding_sync/test_connection_tracking.py +++ b/tests/rest/client/sliding_sync/test_connection_tracking.py @@ -24,7 +24,7 @@ from synapse.handlers.sliding_sync import room_lists from synapse.rest.client import login, room, sync from synapse.server import HomeServer -from synapse.storage.databases.main.sliding_sync import CONNECTION_EXPIRY_MS +from synapse.storage.databases.main.sliding_sync import CONNECTION_EXPIRY from synapse.util.clock import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase @@ -407,7 +407,7 @@ def test_sliding_sync_connection_expires_with_too_much_data(self) -> None: we expire the connection and ask the client to do a full resync. Connections are only expired if they have not been used for a minimum - amount of time (MINIMUM_NOT_USED_AGE_EXPIRY_MS) to avoid expiring + amount of time (MINIMUM_NOT_USED_AGE_EXPIRY) to avoid expiring connections that are actively being used. """ @@ -455,7 +455,7 @@ def test_sliding_sync_connection_expires_with_too_much_data(self) -> None: self.helper.send(room_id, "msg", tok=user2_tok) # Advance the clock to ensure that the last_used_ts is old enough - self.reactor.advance(2 * room_lists.MINIMUM_NOT_USED_AGE_EXPIRY_MS / 1000) + self.reactor.advance(2 * room_lists.MINIMUM_NOT_USED_AGE_EXPIRY.as_secs()) # This sync should now raise SlidingSyncUnknownPosition channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok) @@ -490,14 +490,14 @@ def test_sliding_sync_connection_expires_after_time(self) -> None: _, from_token = self.do_sync(sync_body, tok=user1_tok) # We can keep syncing so long as the interval between requests is less - # than CONNECTION_EXPIRY_MS + # than CONNECTION_EXPIRY for _ in range(5): - self.reactor.advance(0.5 * CONNECTION_EXPIRY_MS / 1000) + self.reactor.advance(0.5 * CONNECTION_EXPIRY.as_secs()) _, from_token = self.do_sync(sync_body, tok=user1_tok) # ... but if we wait too long, the connection expires - self.reactor.advance(1 + CONNECTION_EXPIRY_MS / 1000) + self.reactor.advance(1 + CONNECTION_EXPIRY.as_secs()) # This sync should now raise SlidingSyncUnknownPosition channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 64d22d485a..0407bb5347 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -26,12 +26,9 @@ from twisted.internet import defer, reactor as _reactor from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context -from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache +from synapse.rest.client.transactions import CLEANUP_PERIOD, HttpTransactionCache from synapse.types import ISynapseReactor, JsonDict from synapse.util.clock import Clock -from synapse.util.constants import ( - MILLISECONDS_PER_SECOND, -) from tests import unittest from tests.server import get_clock @@ -187,7 +184,7 @@ def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]: ) # Advance time just under the cleanup period. # Should NOT have cleaned up yet - self.reactor.advance((CLEANUP_PERIOD_MS - 1) / MILLISECONDS_PER_SECOND) + self.reactor.advance(CLEANUP_PERIOD.as_secs() - 1) yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "an arg" @@ -196,7 +193,7 @@ def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]: cb.assert_called_once_with("an arg") # Advance time just after the cleanup period. - self.reactor.advance(2 / MILLISECONDS_PER_SECOND) + self.reactor.advance(2) yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "an arg" diff --git a/tests/storage/databases/main/test_deviceinbox.py b/tests/storage/databases/main/test_deviceinbox.py index dbf362a3cc..be585068fe 100644 --- a/tests/storage/databases/main/test_deviceinbox.py +++ b/tests/storage/databases/main/test_deviceinbox.py @@ -28,7 +28,7 @@ from synapse.rest.client import devices from synapse.server import HomeServer from synapse.storage.databases.main.deviceinbox import ( - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS, + DEVICE_FEDERATION_INBOX_CLEANUP_DELAY, ) from synapse.util.clock import Clock @@ -191,7 +191,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.db_pool = self.store.db_pool # Advance time to ensure we are past the cleanup delay - self.reactor.advance(DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS * 2 / 1000) + self.reactor.advance(DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_secs() * 2) def test_delete_old_federation_inbox_rows_skips_if_no_index(self) -> None: """Test that we don't delete rows if the index hasn't been created yet.""" @@ -245,7 +245,7 @@ def test_delete_old_federation_inbox_rows(self) -> None: ) ) - self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS / 1000) + self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_secs()) # Insert new messages for i in range(5): @@ -293,7 +293,7 @@ def test_delete_old_federation_inbox_rows_batch_limit(self) -> None: ) # Advance time to ensure we are past the cleanup delay - self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS / 1000) + self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_millis()) # Run the cleanup - it should delete in batches and sleep between them deferred = defer.ensureDeferred( From c928347779cfd1dd028354c4d50674514903b7df Mon Sep 17 00:00:00 2001 From: Richard van der Hoff <1389908+richvdh@users.noreply.github.com> Date: Wed, 26 Nov 2025 16:12:14 +0000 Subject: [PATCH 15/47] Implement MSC4380: Invite blocking (#19203) MSC4380 aims to be a simplified implementation of MSC4155; the hope is that we can get it specced and rolled out rapidly, so that we can resolve the fact that `matrix.org` has enabled MSC4155. The implementation leans heavily on what's already there for MSC4155. It has its own `experimental_features` flag. If both MSC4155 and MSC4380 are enabled, and a user has both configurations set, then we prioritise the MSC4380 one. Contributed wearing my :tophat: Spec Core Team hat. --- changelog.d/19203.feature | 1 + synapse/api/constants.py | 4 + synapse/api/errors.py | 2 +- synapse/config/experimental.py | 3 + synapse/rest/client/versions.py | 2 + .../storage/databases/main/account_data.py | 36 +++-- synapse/storage/invite_rule.py | 44 +++++- tests/handlers/test_room_member.py | 146 +++++++++++++++++- tests/storage/test_invite_rule.py | 28 ++-- 9 files changed, 239 insertions(+), 27 deletions(-) create mode 100644 changelog.d/19203.feature diff --git a/changelog.d/19203.feature b/changelog.d/19203.feature new file mode 100644 index 0000000000..d192781b20 --- /dev/null +++ b/changelog.d/19203.feature @@ -0,0 +1 @@ +Add experimentatal implememntation of [MSC4380](https://github.com/matrix-org/matrix-spec-proposals/pull/4380) (invite blocking). diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 7a8f546d6b..d41e44b154 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -307,6 +307,10 @@ class AccountDataTypes: MSC4155_INVITE_PERMISSION_CONFIG: Final = ( "org.matrix.msc4155.invite_permission_config" ) + # MSC4380: Invite blocking + MSC4380_INVITE_PERMISSION_CONFIG: Final = ( + "org.matrix.msc4380.invite_permission_config" + ) # Synapse-specific behaviour. See "Client-Server API Extensions" documentation # in Admin API for more information. SYNAPSE_ADMIN_CLIENT_CONFIG: Final = "io.element.synapse.admin_client_config" diff --git a/synapse/api/errors.py b/synapse/api/errors.py index c4339ebef8..37b909a1a7 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -137,7 +137,7 @@ class Codes(str, Enum): PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE" KEY_TOO_LARGE = "M_KEY_TOO_LARGE" - # Part of MSC4155 + # Part of MSC4155/MSC4380 INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED" # Part of MSC4190 diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 566071eef3..dc5e096791 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -596,3 +596,6 @@ def read_config( # MSC4306: Thread Subscriptions # (and MSC4308: Thread Subscriptions extension to Sliding Sync) self.msc4306_enabled: bool = experimental.get("msc4306_enabled", False) + + # MSC4380: Invite blocking + self.msc4380_enabled: bool = experimental.get("msc4380_enabled", False) diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index a0178e473d..75f27c98de 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -182,6 +182,8 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: "org.matrix.msc4306": self.config.experimental.msc4306_enabled, # MSC4169: Backwards-compatible redaction sending using `/send` "com.beeper.msc4169": self.config.experimental.msc4169_enabled, + # MSC4380: Invite blocking + "org.matrix.msc4380": self.config.experimental.msc4380_enabled, }, }, ) diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 15728cf618..71182cdab2 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -40,7 +40,12 @@ ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.push_rule import PushRulesWorkerStore -from synapse.storage.invite_rule import InviteRulesConfig +from synapse.storage.invite_rule import ( + AllowAllInviteRulesConfig, + InviteRulesConfig, + MSC4155InviteRulesConfig, + MSC4380InviteRulesConfig, +) from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import JsonDict, JsonMapping from synapse.util.caches.descriptors import cached @@ -104,6 +109,7 @@ def __init__( ) self._msc4155_enabled = hs.config.experimental.msc4155_enabled + self._msc4380_enabled = hs.config.experimental.msc4380_enabled def get_max_account_data_stream_id(self) -> int: """Get the current max stream ID for account data stream @@ -562,20 +568,28 @@ async def ignored_users(self, user_id: str) -> frozenset[str]: async def get_invite_config_for_user(self, user_id: str) -> InviteRulesConfig: """ - Get the invite configuration for the current user. + Get the invite configuration for the given user. Args: - user_id: + user_id: The user whose invite configuration should be returned. """ + if self._msc4380_enabled: + data = await self.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG + ) + # If the user has an MSC4380-style config setting, prioritise that + # above an MSC4155 one + if data is not None: + return MSC4380InviteRulesConfig.from_account_data(data) + + if self._msc4155_enabled: + data = await self.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG + ) + if data is not None: + return MSC4155InviteRulesConfig(data) - if not self._msc4155_enabled: - # This equates to allowing all invites, as if the setting was off. - return InviteRulesConfig(None) - - data = await self.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG - ) - return InviteRulesConfig(data) + return AllowAllInviteRulesConfig() async def get_admin_client_config_for_user(self, user_id: str) -> AdminClientConfig: """ diff --git a/synapse/storage/invite_rule.py b/synapse/storage/invite_rule.py index 3de77e8c21..489533a9f4 100644 --- a/synapse/storage/invite_rule.py +++ b/synapse/storage/invite_rule.py @@ -1,7 +1,9 @@ import logging +from abc import abstractmethod from enum import Enum from typing import Pattern +import attr from matrix_common.regex import glob_to_regex from synapse.types import JsonMapping, UserID @@ -18,9 +20,29 @@ class InviteRule(Enum): class InviteRulesConfig: - """Class to determine if a given user permits an invite from another user, and the action to take.""" + """An object encapsulating a given user's choices about whether to accept invites.""" - def __init__(self, account_data: JsonMapping | None): + @abstractmethod + def get_invite_rule(self, inviter_user_id: str) -> InviteRule: + """Get the invite rule that matches this user. Will return InviteRule.ALLOW if no rules match + + Args: + inviter_user_id: The user ID of the inviting user. + """ + + +@attr.s(slots=True) +class AllowAllInviteRulesConfig(InviteRulesConfig): + """An `InviteRulesConfig` implementation which will accept all invites.""" + + def get_invite_rule(self, inviter_user_id: str) -> InviteRule: + return InviteRule.ALLOW + + +class MSC4155InviteRulesConfig(InviteRulesConfig): + """An object encapsulating [MSC4155](https://github.com/matrix-org/matrix-spec-proposals/pull/4155) invite rules.""" + + def __init__(self, account_data: JsonMapping): self.allowed_users: list[Pattern[str]] = [] self.ignored_users: list[Pattern[str]] = [] self.blocked_users: list[Pattern[str]] = [] @@ -110,3 +132,21 @@ def get_invite_rule(self, user_id: str) -> InviteRule: return rule return InviteRule.ALLOW + + +@attr.s(slots=True, auto_attribs=True) +class MSC4380InviteRulesConfig(InviteRulesConfig): + default_invite_rule: InviteRule + """The invite rule to apply to all invites.""" + + @classmethod + def from_account_data(cls, data: JsonMapping) -> "MSC4380InviteRulesConfig": + default = data.get("default_action") + + default_invite_rule = ( + InviteRule.BLOCK if default == "block" else InviteRule.ALLOW + ) + return cls(default_invite_rule=default_invite_rule) + + def get_invite_rule(self, inviter_user_id: str) -> InviteRule: + return self.default_invite_rule diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 8f9e27603e..d8d7caaf1b 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -458,7 +458,9 @@ def test_deduplicate_joins(self) -> None: self.assertEqual(initial_count, new_count) -class TestInviteFiltering(FederatingHomeserverTestCase): +class TestMSC4155InviteFiltering(FederatingHomeserverTestCase): + """Tests for MSC4155-style invite filtering.""" + servlets = [ synapse.rest.admin.register_servlets, synapse.rest.client.login.register_servlets, @@ -618,3 +620,145 @@ def test_msc4155_block_invite_remote_server(self) -> None: ).value self.assertEqual(f.code, 403) self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") + + +class TestMSC4380InviteBlocking(FederatingHomeserverTestCase): + """Tests for MSC4380-style invite filtering.""" + + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + synapse.rest.client.room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.handler = hs.get_room_member_handler() + self.fed_handler = hs.get_federation_handler() + self.store = hs.get_datastores().main + + # Create two users. + self.alice = self.register_user("alice", "pass") + self.alice_token = self.login("alice", "pass") + self.bob = self.register_user("bob", "pass") + self.bob_token = self.login("bob", "pass") + + @override_config({"experimental_features": {"msc4380_enabled": True}}) + def test_misc4380_block_invite_local(self) -> None: + """Test that MSC4380 will block a user from being invited to a room""" + room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG, + { + "default_action": "block", + }, + ) + ) + + f = self.get_failure( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.bob), + room_id=room_id, + action=Membership.INVITE, + ), + SynapseError, + ).value + self.assertEqual(f.code, 403) + self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") + + @override_config({"experimental_features": {"msc4380_enabled": True}}) + def test_misc4380_non_string_setting(self) -> None: + """Test that `default_action` being set to something non-stringy is the same as "accept".""" + room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG, + { + "default_action": 1, + }, + ) + ) + + self.get_success( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.bob), + room_id=room_id, + action=Membership.INVITE, + ) + ) + + @override_config({"experimental_features": {"msc4380_enabled": False}}) + def test_msc4380_disabled_allow_invite_local(self) -> None: + """Test that, when MSC4380 is not enabled, invites are accepted as normal""" + room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG, + { + "default_action": "block", + }, + ) + ) + + self.get_success( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.bob), + room_id=room_id, + action=Membership.INVITE, + ), + ) + + @override_config({"experimental_features": {"msc4380_enabled": True}}) + def test_msc4380_block_invite_remote(self) -> None: + """Test that MSC4380 will block a user from being invited to a room by a remote user.""" + # A remote user who sends the invite + remote_server = "otherserver" + remote_user = "@otheruser:" + remote_server + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG, + {"default_action": "block"}, + ) + ) + + room_id = self.helper.create_room_as( + room_creator=self.alice, tok=self.alice_token + ) + room_version = self.get_success(self.store.get_room_version(room_id)) + + invite_event = event_from_pdu_json( + { + "type": EventTypes.Member, + "content": {"membership": "invite"}, + "room_id": room_id, + "sender": remote_user, + "state_key": self.bob, + "depth": 32, + "prev_events": [], + "auth_events": [], + "origin_server_ts": self.clock.time_msec(), + }, + room_version, + ) + + f = self.get_failure( + self.fed_handler.on_invite_request( + remote_server, + invite_event, + invite_event.room_version, + ), + SynapseError, + ).value + self.assertEqual(f.code, 403) + self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") diff --git a/tests/storage/test_invite_rule.py b/tests/storage/test_invite_rule.py index 38c97ecaa3..ae99907704 100644 --- a/tests/storage/test_invite_rule.py +++ b/tests/storage/test_invite_rule.py @@ -1,4 +1,8 @@ -from synapse.storage.invite_rule import InviteRule, InviteRulesConfig +from synapse.storage.invite_rule import ( + AllowAllInviteRulesConfig, + InviteRule, + MSC4155InviteRulesConfig, +) from synapse.types import UserID from tests import unittest @@ -10,23 +14,23 @@ class InviteFilterTestCase(unittest.TestCase): - def test_empty(self) -> None: + def test_allow_all(self) -> None: """Permit by default""" - config = InviteRulesConfig(None) + config = AllowAllInviteRulesConfig() self.assertEqual( config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW ) def test_ignore_invalid(self) -> None: """Invalid strings are ignored""" - config = InviteRulesConfig({"blocked_users": ["not a user"]}) + config = MSC4155InviteRulesConfig({"blocked_users": ["not a user"]}) self.assertEqual( config.get_invite_rule(blocked_user.to_string()), InviteRule.ALLOW ) def test_user_blocked(self) -> None: """Permit all, except explicitly blocked users""" - config = InviteRulesConfig({"blocked_users": [blocked_user.to_string()]}) + config = MSC4155InviteRulesConfig({"blocked_users": [blocked_user.to_string()]}) self.assertEqual( config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK ) @@ -36,7 +40,7 @@ def test_user_blocked(self) -> None: def test_user_ignored(self) -> None: """Permit all, except explicitly ignored users""" - config = InviteRulesConfig({"ignored_users": [ignored_user.to_string()]}) + config = MSC4155InviteRulesConfig({"ignored_users": [ignored_user.to_string()]}) self.assertEqual( config.get_invite_rule(ignored_user.to_string()), InviteRule.IGNORE ) @@ -46,7 +50,7 @@ def test_user_ignored(self) -> None: def test_user_precedence(self) -> None: """Always take allowed over ignored, ignored over blocked, and then block.""" - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "allowed_users": [allowed_user.to_string()], "ignored_users": [allowed_user.to_string(), ignored_user.to_string()], @@ -70,7 +74,7 @@ def test_user_precedence(self) -> None: def test_server_blocked(self) -> None: """Block all users on the server except those allowed.""" user_on_same_server = UserID("blocked", allowed_user.domain) - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "allowed_users": [allowed_user.to_string()], "blocked_servers": [allowed_user.domain], @@ -86,7 +90,7 @@ def test_server_blocked(self) -> None: def test_server_ignored(self) -> None: """Ignore all users on the server except those allowed.""" user_on_same_server = UserID("ignored", allowed_user.domain) - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "allowed_users": [allowed_user.to_string()], "ignored_servers": [allowed_user.domain], @@ -104,7 +108,7 @@ def test_server_allow(self) -> None: blocked_user_on_same_server = UserID("blocked", allowed_user.domain) ignored_user_on_same_server = UserID("ignored", allowed_user.domain) allowed_user_on_same_server = UserID("another", allowed_user.domain) - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "ignored_users": [ignored_user_on_same_server.to_string()], "blocked_users": [blocked_user_on_same_server.to_string()], @@ -129,7 +133,7 @@ def test_server_allow(self) -> None: def test_server_precedence(self) -> None: """Always take allowed over ignored, ignored over blocked, and then block.""" - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "allowed_servers": [allowed_user.domain], "ignored_servers": [allowed_user.domain, ignored_user.domain], @@ -152,7 +156,7 @@ def test_server_precedence(self) -> None: def test_server_glob(self) -> None: """Test that glob patterns match""" - config = InviteRulesConfig({"blocked_servers": ["*.example.org"]}) + config = MSC4155InviteRulesConfig({"blocked_servers": ["*.example.org"]}) self.assertEqual( config.get_invite_rule(allowed_user.to_string()), InviteRule.BLOCK ) From 703464c1f78418c5f145a22e3a2f58d197be7270 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Wed, 26 Nov 2025 17:17:04 +0000 Subject: [PATCH 16/47] Fix case where `get_partial_current_state_deltas` could return >100 rows (#18960) --- changelog.d/18960.bugfix | 1 + synapse/storage/controllers/state.py | 2 +- .../storage/databases/main/state_deltas.py | 97 ++++-- tests/storage/test_state.py | 314 ++++++++++++++++++ 4 files changed, 380 insertions(+), 34 deletions(-) create mode 100644 changelog.d/18960.bugfix diff --git a/changelog.d/18960.bugfix b/changelog.d/18960.bugfix new file mode 100644 index 0000000000..909089f809 --- /dev/null +++ b/changelog.d/18960.bugfix @@ -0,0 +1 @@ +Fix a bug in the database function for fetching state deltas that could result in unnecessarily long query times. \ No newline at end of file diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 9c5e837ab0..4885268305 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -683,7 +683,7 @@ async def get_current_state_deltas( # https://github.com/matrix-org/synapse/issues/13008 return await self.stores.main.get_partial_current_state_deltas( - prev_stream_id, max_stream_id + prev_stream_id, max_stream_id, limit=100 ) @trace diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index cd8f286d08..a5d5407327 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -78,27 +78,41 @@ def __init__( ) async def get_partial_current_state_deltas( - self, prev_stream_id: int, max_stream_id: int + self, prev_stream_id: int, max_stream_id: int, limit: int = 100 ) -> tuple[int, list[StateDelta]]: - """Fetch a list of room state changes since the given stream id + """Fetch a list of room state changes since the given stream id. This may be the partial state if we're lazy joining the room. + This method takes care to handle state deltas that share the same + `stream_id`. That can happen when persisting state in a batch, + potentially as the result of state resolution (both adding new state and + undo'ing previous state). + + State deltas are grouped by `stream_id`. When hitting the given `limit` + would return only part of a "group" of state deltas, that entire group + is omitted. Thus, this function may return *up to* `limit` state deltas, + or slightly more when a single group itself exceeds `limit`. + Args: prev_stream_id: point to get changes since (exclusive) max_stream_id: the point that we know has been correctly persisted - ie, an upper limit to return changes from. + limit: the maximum number of rows to return. Returns: A tuple consisting of: - the stream id which these results go up to - list of current_state_delta_stream rows. If it is empty, we are up to date. - - A maximum of 100 rows will be returned. """ prev_stream_id = int(prev_stream_id) + if limit <= 0: + raise ValueError( + "Invalid `limit` passed to `get_partial_current_state_deltas" + ) + # check we're not going backwards assert prev_stream_id <= max_stream_id, ( f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}" @@ -115,45 +129,62 @@ async def get_partial_current_state_deltas( def get_current_state_deltas_txn( txn: LoggingTransaction, ) -> tuple[int, list[StateDelta]]: - # First we calculate the max stream id that will give us less than - # N results. - # We arbitrarily limit to 100 stream_id entries to ensure we don't - # select toooo many. - sql = """ - SELECT stream_id, count(*) + # First we group state deltas by `stream_id` and calculate which + # groups can be returned without exceeding the provided `limit`. + sql_grouped = """ + SELECT stream_id, COUNT(*) AS c FROM current_state_delta_stream WHERE stream_id > ? AND stream_id <= ? GROUP BY stream_id - ORDER BY stream_id ASC - LIMIT 100 + ORDER BY stream_id + LIMIT ? """ - txn.execute(sql, (prev_stream_id, max_stream_id)) - - total = 0 - - for stream_id, count in txn: - total += count - if total > 100: - # We arbitrarily limit to 100 entries to ensure we don't - # select toooo many. - logger.debug( - "Clipping current_state_delta_stream rows to stream_id %i", - stream_id, - ) - clipped_stream_id = stream_id + group_limit = limit + 1 + txn.execute(sql_grouped, (prev_stream_id, max_stream_id, group_limit)) + grouped_rows = txn.fetchall() + + if not grouped_rows: + # Nothing to return in the range; we are up to date through max_stream_id. + return max_stream_id, [] + + # Always retrieve the first group, at the bare minimum. This ensures the + # caller always makes progress, even if a single group exceeds `limit`. + fetch_upto_stream_id, included_rows = grouped_rows[0] + + # Determine which other groups we can retrieve at the same time, + # without blowing the budget. + included_all_groups = True + for stream_id, count in grouped_rows[1:]: + if included_rows + count > limit: + included_all_groups = False break - else: - # if there's no problem, we may as well go right up to the max_stream_id - clipped_stream_id = max_stream_id + included_rows += count + fetch_upto_stream_id = stream_id + + # If we retrieved fewer groups than the limit *and* we didn't hit the + # `LIMIT ?` cap on the grouping query, we know we've caught up with + # the stream. + caught_up_with_stream = ( + included_all_groups and len(grouped_rows) < group_limit + ) + + # At this point we should have advanced, or bailed out early above. + assert fetch_upto_stream_id != prev_stream_id - # Now actually get the deltas - sql = """ + # 2) Fetch the actual rows for only the included stream_id groups. + sql_rows = """ SELECT stream_id, room_id, type, state_key, event_id, prev_event_id FROM current_state_delta_stream WHERE ? < stream_id AND stream_id <= ? ORDER BY stream_id ASC """ - txn.execute(sql, (prev_stream_id, clipped_stream_id)) + txn.execute(sql_rows, (prev_stream_id, fetch_upto_stream_id)) + rows = txn.fetchall() + + clipped_stream_id = ( + max_stream_id if caught_up_with_stream else fetch_upto_stream_id + ) + return clipped_stream_id, [ StateDelta( stream_id=row[0], @@ -163,7 +194,7 @@ def get_current_state_deltas_txn( event_id=row[4], prev_event_id=row[5], ) - for row in txn.fetchall() + for row in rows ] return await self.db_pool.runInteraction( diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 8e821c6d18..dbbede812d 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -19,6 +19,7 @@ # # +import json import logging from typing import cast @@ -33,6 +34,7 @@ from synapse.types import JsonDict, RoomID, StateMap, UserID from synapse.types.state import StateFilter from synapse.util.clock import Clock +from synapse.util.stringutils import random_string from tests.unittest import HomeserverTestCase @@ -643,3 +645,315 @@ def test_batched_state_group_storing(self) -> None: ), ) self.assertEqual(context.state_group_before_event, groups[0][0]) + + +class CurrentStateDeltaStreamTestCase(HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + self.store = hs.get_datastores().main + self.storage = hs.get_storage_controllers() + self.state_datastore = self.storage.state.stores.state + self.event_creation_handler = hs.get_event_creation_handler() + self.event_builder_factory = hs.get_event_builder_factory() + + # Create a made-up room and a user. + self.alice_user_id = UserID.from_string("@alice:test") + self.room = RoomID.from_string("!abc1234:test") + + self.get_success( + self.store.store_room( + self.room.to_string(), + room_creator_user_id="@creator:text", + is_public=True, + room_version=RoomVersions.V1, + ) + ) + + def inject_state_event( + self, room: RoomID, sender: UserID, typ: str, state_key: str, content: JsonDict + ) -> EventBase: + builder = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": typ, + "sender": sender.to_string(), + "state_key": state_key, + "room_id": room.to_string(), + "content": content, + }, + ) + + event, unpersisted_context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) + ) + + context = self.get_success(unpersisted_context.persist(event)) + + assert self.storage.persistence is not None + self.get_success(self.storage.persistence.persist_event(event, context)) + + return event + + def test_get_partial_current_state_deltas_limit(self) -> None: + """ + Tests that `get_partial_current_state_deltas` actually returns `limit` rows. + + Regression test for https://github.com/element-hq/synapse/pull/18960. + """ + # Inject a create event which other events can auth with. + self.inject_state_event( + self.room, self.alice_user_id, EventTypes.Create, "", {} + ) + + limit = 2 + + # Make N*2 state changes in the room, resulting in 2N+1 total state + # events (including the create event) in the room. + for i in range(limit * 2): + self.inject_state_event( + self.room, + self.alice_user_id, + EventTypes.Name, + "", + {"name": f"rename #{i}"}, + ) + + # Call the function under test. This must return <= `limit` rows. + max_stream_id = self.store.get_room_max_stream_ordering() + clipped_stream_id, deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=0, + max_stream_id=max_stream_id, + limit=limit, + ) + ) + + self.assertLessEqual( + len(deltas), limit, f"Returned {len(deltas)} rows, expected at most {limit}" + ) + + # Advancing from the clipped point should eventually drain the remainder. + # Make sure we make progress and don’t get stuck. + if deltas: + next_prev = clipped_stream_id + next_clipped, next_deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=next_prev, max_stream_id=max_stream_id, limit=limit + ) + ) + self.assertNotEqual( + next_clipped, clipped_stream_id, "Did not advance clipped_stream_id" + ) + # Still should respect the limit. + self.assertLessEqual(len(next_deltas), limit) + + def test_non_unique_stream_ids_in_current_state_delta_stream(self) -> None: + """ + Tests that `get_partial_current_state_deltas` always returns entire + groups of state deltas (grouped by `stream_id`), and never part of one. + + We check by passing a `limit` that to the function that, if followed + blindly, would split a group of state deltas that share a `stream_id`. + The test passes if that group is not returned at all (because doing so + would overshoot the limit of returned state deltas). + + Regression test for https://github.com/element-hq/synapse/pull/18960. + """ + # Inject a create event to start with. + self.inject_state_event( + self.room, self.alice_user_id, EventTypes.Create, "", {} + ) + + # Then inject one "real" m.room.name event. This will give us a stream_id that + # we can create some more (fake) events with. + self.inject_state_event( + self.room, + self.alice_user_id, + EventTypes.Name, + "", + {"name": "rename #1"}, + ) + + # Get the stream_id of the last-inserted event. + max_stream_id = self.store.get_room_max_stream_ordering() + + # Make 3 more state changes in the room, resulting in 5 total state + # events (including the create event, and the first name update) in + # the room. + # + # All of these state deltas have the same `stream_id` as the original name event. + # Do so by editing the table directly as that's the simplest way to have + # all share the same `stream_id`. + self.get_success( + self.store.db_pool.simple_insert_many( + "current_state_delta_stream", + keys=( + "stream_id", + "room_id", + "type", + "state_key", + "event_id", + "prev_event_id", + "instance_name", + ), + values=[ + ( + max_stream_id, + self.room.to_string(), + EventTypes.Name, + "", + f"${random_string(5)}:test", + json.dumps({"name": f"rename #{i}"}), + "master", + ) + for i in range(3) + ], + desc="inject_room_name_state_events", + ) + ) + + # Call the function under test with a limit of 4. Without the limit, we + # would return 5 state deltas: + # + # C N N N N + # 1 2 3 4 5 + # + # C = m.room.create + # N = m.room.name + # + # With the limit, we should return only the create event, as returning 4 + # state deltas would result in splitting a group: + # + # 2 3 3 3 3 - state IDs/groups + # C N N N N + # 1 2 3 4 X + + clipped_stream_id, deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=0, + max_stream_id=max_stream_id, + limit=4, + ) + ) + + # 2 is the stream ID of the m.room.create event. + self.assertEqual(clipped_stream_id, 2) + self.assertEqual( + len(deltas), + 1, + f"Returned {len(deltas)} rows, expected only one (the create event): {deltas}", + ) + + # Advance once more with our limit of 4. We should now get all 4 + # `m.room.name` state deltas as they can fit under the limit. + clipped_stream_id, next_deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=clipped_stream_id, max_stream_id=max_stream_id, limit=4 + ) + ) + self.assertEqual( + clipped_stream_id, 3 + ) # The stream ID of the 4 m.room.name events. + + self.assertEqual( + len(next_deltas), + 4, + f"Returned {len(next_deltas)} rows, expected all 4 m.room.name events: {next_deltas}", + ) + + def test_get_partial_current_state_deltas_does_not_enter_infinite_loop( + self, + ) -> None: + """ + Tests that `get_partial_current_state_deltas` does not repeatedly return + zero entries due to the passed `limit` parameter being less than the + size of the next group of state deltas from the given `prev_stream_id`. + """ + # Inject a create event to start with. + self.inject_state_event( + self.room, self.alice_user_id, EventTypes.Create, "", {} + ) + + # Then inject one "real" m.room.name event. This will give us a stream_id that + # we can create some more (fake) events with. + self.inject_state_event( + self.room, + self.alice_user_id, + EventTypes.Name, + "", + {"name": "rename #1"}, + ) + + # Get the stream_id of the last-inserted event. + max_stream_id = self.store.get_room_max_stream_ordering() + + # Make 3 more state changes in the room, resulting in 5 total state + # events (including the create event, and the first name update) in + # the room. + # + # All of these state deltas have the same `stream_id` as the original name event. + # Do so by editing the table directly as that's the simplest way to have + # all share the same `stream_id`. + self.get_success( + self.store.db_pool.simple_insert_many( + "current_state_delta_stream", + keys=( + "stream_id", + "room_id", + "type", + "state_key", + "event_id", + "prev_event_id", + "instance_name", + ), + values=[ + ( + max_stream_id, + self.room.to_string(), + EventTypes.Name, + "", + f"${random_string(5)}:test", + json.dumps({"name": f"rename #{i}"}), + "master", + ) + for i in range(3) + ], + desc="inject_room_name_state_events", + ) + ) + + # Call the function under test with a limit of 4. Without the limit, we would return + # 5 state deltas: + # + # C N N N N + # 1 2 3 4 5 + # + # C = m.room.create + # N = m.room.name + # + # With the limit, we should return only the create event, as returning 4 + # state deltas would result in splitting a group: + # + # 2 3 3 3 3 - state IDs/groups + # C N N N N + # 1 2 3 4 X + + clipped_stream_id, deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=2, # Start after the create event (which has stream_id 2). + max_stream_id=max_stream_id, + limit=2, # Less than the size of the next group (which is 4). + ) + ) + + self.assertEqual( + clipped_stream_id, 3 + ) # The stream ID of the 4 m.room.name events. + + # We should get all 4 `m.room.name` state deltas, instead of 0, which + # would result in the caller entering an infinite loop. + self.assertEqual( + len(deltas), + 4, + f"Returned {len(deltas)} rows, expected 4 even though it broke our limit: {deltas}", + ) From 52089f1f790174908963233922329c9afe558ccc Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 27 Nov 2025 16:15:06 +0000 Subject: [PATCH 17/47] Prevent `lint-newsfile` job activating when fixing dependabot PR branches (#19220) --- .github/workflows/tests.yml | 3 ++- changelog.d/19220.misc | 1 + 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19220.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 771474f74e..dd183840ef 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -192,7 +192,8 @@ jobs: run: scripts-dev/check_line_terminators.sh lint-newsfile: - if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }} + # Only run on pull_request events, targeting develop/release branches, and skip when the PR author is dependabot[bot]. + if: ${{ github.event_name == 'pull_request' && (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.event.pull_request.user.login != 'dependabot[bot]' }} runs-on: ubuntu-latest steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 diff --git a/changelog.d/19220.misc b/changelog.d/19220.misc new file mode 100644 index 0000000000..e98f5ade61 --- /dev/null +++ b/changelog.d/19220.misc @@ -0,0 +1 @@ +Prevent changelog check CI running on @dependabot's PRs even when a human has modified the branch. \ No newline at end of file From 566670c363915691826b5b435c4aa7acde61b408 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Thu, 27 Nov 2025 16:44:17 +0000 Subject: [PATCH 18/47] Move `RestartDelayedEventServlet` to workers (#19207) --- changelog.d/19207.feature | 1 + docker/configure_workers_and_start.py | 1 + docs/upgrade.md | 8 ++++++ docs/workers.md | 5 +++- synapse/handlers/delayed_events.py | 25 ++++++++++++------- synapse/rest/client/delayed_events.py | 4 +-- .../storage/databases/main/delayed_events.py | 19 ++++++++++++-- 7 files changed, 49 insertions(+), 14 deletions(-) create mode 100644 changelog.d/19207.feature diff --git a/changelog.d/19207.feature b/changelog.d/19207.feature new file mode 100644 index 0000000000..e64562c350 --- /dev/null +++ b/changelog.d/19207.feature @@ -0,0 +1 @@ +Allow restarting delayed event timeouts on workers. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index e19b0a0039..e7cbd701b8 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -196,6 +196,7 @@ "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload", "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/device_signing/upload$", "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$", + "^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$", ], "shared_extra_conf": {}, "worker_extra_conf": "", diff --git a/docs/upgrade.md b/docs/upgrade.md index 350b71fe47..5e7fa31580 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -119,6 +119,14 @@ stacking them up. You can monitor the currently running background updates with # Upgrading to v1.144.0 +## Worker support for unstable MSC4140 `/restart` endpoint + +The following unstable endpoint pattern may now be routed to worker processes: + +``` +^/_matrix/client/unstable/org.matrix.msc4140/delayed_events/.*/restart$ +``` + ## Unstable mutual rooms endpoint is now behind an experimental feature flag The unstable mutual rooms endpoint from diff --git a/docs/workers.md b/docs/workers.md index f766b40251..2bc8afa74f 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -285,10 +285,13 @@ information. # User directory search requests ^/_matrix/client/(r0|v3|unstable)/user_directory/search$ + # Unstable MSC4140 support + ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$ + Additionally, the following REST endpoints can be handled for GET requests: + # Push rules requests ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ - ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events # Account data requests ^/_matrix/client/(r0|v3|unstable)/.*/tags diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py index de21e3abbb..8817b65316 100644 --- a/synapse/handlers/delayed_events.py +++ b/synapse/handlers/delayed_events.py @@ -96,16 +96,18 @@ async def _schedule_db_events() -> None: self.notify_new_event, ) - # Delayed events that are already marked as processed on startup might not have been - # sent properly on the last run of the server, so unmark them to send them again. + # Now process any delayed events that are due to be sent. + # + # We set `reprocess_events` to True in case any events had been + # marked as processed, but had not yet actually been sent, + # before the homeserver stopped. + # # Caveat: this will double-send delayed events that successfully persisted, but failed # to be removed from the DB table of delayed events. # TODO: To avoid double-sending, scan the timeline to find which of these events were # already sent. To do so, must store delay_ids in sent events to retrieve them later. - await self._store.unprocess_delayed_events() - events, next_send_ts = await self._store.process_timeout_delayed_events( - self._get_current_ts() + self._get_current_ts(), reprocess_events=True ) if next_send_ts: @@ -423,18 +425,23 @@ async def restart(self, request: SynapseRequest, delay_id: str) -> None: Raises: NotFoundError: if no matching delayed event could be found. """ - assert self._is_master await self._delayed_event_mgmt_ratelimiter.ratelimit( None, request.getClientAddress().host ) - await make_deferred_yieldable(self._initialized_from_db) + + # Note: We don't need to wait on `self._initialized_from_db` here as the + # events that deals with are already marked as processed. + # + # `restart_delayed_events` will skip over such events entirely. next_send_ts = await self._store.restart_delayed_event( delay_id, self._get_current_ts() ) - if self._next_send_ts_changed(next_send_ts): - self._schedule_next_at(next_send_ts) + # Only the main process handles sending delayed events. + if self._is_master: + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at(next_send_ts) async def send(self, request: SynapseRequest, delay_id: str) -> None: """ diff --git a/synapse/rest/client/delayed_events.py b/synapse/rest/client/delayed_events.py index 69d1013e72..7afecffe2d 100644 --- a/synapse/rest/client/delayed_events.py +++ b/synapse/rest/client/delayed_events.py @@ -156,10 +156,10 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - # The following can't currently be instantiated on workers. + # Most of the following can't currently be instantiated on workers. if hs.config.worker.worker_app is None: UpdateDelayedEventServlet(hs).register(http_server) CancelDelayedEventServlet(hs).register(http_server) - RestartDelayedEventServlet(hs).register(http_server) SendDelayedEventServlet(hs).register(http_server) + RestartDelayedEventServlet(hs).register(http_server) DelayedEventsServlet(hs).register(http_server) diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py index 7f72be46f5..5547150515 100644 --- a/synapse/storage/databases/main/delayed_events.py +++ b/synapse/storage/databases/main/delayed_events.py @@ -259,7 +259,7 @@ async def get_all_delayed_events_for_user( ] async def process_timeout_delayed_events( - self, current_ts: Timestamp + self, current_ts: Timestamp, reprocess_events: bool = False ) -> tuple[ list[DelayedEventDetails], Timestamp | None, @@ -268,6 +268,16 @@ async def process_timeout_delayed_events( Marks for processing all delayed events that should have been sent prior to the provided time that haven't already been marked as such. + Args: + current_ts: The current timestamp. + reprocess_events: Whether to reprocess already-processed delayed + events. If set to True, events which are marked as processed + will have their `send_ts` re-checked. + + This is mainly useful for recovering from a server restart; + which could have occurred between an event being marked as + processed and the event actually being sent. + Returns: The details of all newly-processed delayed events, and the send time of the next delayed event to be sent, if any. """ @@ -292,7 +302,12 @@ def process_timeout_delayed_events_txn( ) ) sql_update = "UPDATE delayed_events SET is_processed = TRUE" - sql_where = "WHERE send_ts <= ? AND NOT is_processed" + sql_where = "WHERE send_ts <= ?" + + if not reprocess_events: + # Skip already-processed events. + sql_where += " AND NOT is_processed" + sql_args = (current_ts,) sql_order = "ORDER BY send_ts" if isinstance(self.database_engine, PostgresEngine): From 78ec3043d645a546411ef994ace7370653baa77c Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Fri, 28 Nov 2025 15:49:15 +0000 Subject: [PATCH 19/47] Use sqlglot to properly check SQL delta files (#19224) Rather than using dodgy regexes which keep breaking. Also fixes a regression where it looks like we didn't fail CI if the delta was in the wrong place. --- .github/workflows/tests.yml | 2 +- changelog.d/19224.misc | 1 + poetry.lock | 62 ++++++++----- pyproject.toml | 3 + scripts-dev/check_schema_delta.py | 145 ++++++++++++++++++------------ 5 files changed, 131 insertions(+), 82 deletions(-) create mode 100644 changelog.d/19224.misc diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index dd183840ef..c32d018a64 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -110,7 +110,7 @@ jobs: - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 with: python-version: "3.x" - - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'" + - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20' 'sqlglot>=28.0.0'" - run: scripts-dev/check_schema_delta.py --force-colors check-lockfile: diff --git a/changelog.d/19224.misc b/changelog.d/19224.misc new file mode 100644 index 0000000000..3f8f630c5e --- /dev/null +++ b/changelog.d/19224.misc @@ -0,0 +1 @@ +Improve robustness of the SQL schema linting in CI. diff --git a/poetry.lock b/poetry.lock index f723322a55..35c642fdeb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -31,7 +31,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\"" +markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\"" files = [ {file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"}, {file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"}, @@ -446,7 +446,7 @@ description = "XML bomb protection for Python stdlib modules" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -471,7 +471,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"}, {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"}, @@ -521,7 +521,7 @@ description = "Python wrapper for hiredis" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"all\" or extra == \"redis\"" +markers = "extra == \"redis\" or extra == \"all\"" files = [ {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"}, {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"}, @@ -844,7 +844,7 @@ description = "Jaeger Python OpenTracing Tracer implementation" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, ] @@ -982,7 +982,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" +markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" files = [ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, @@ -998,7 +998,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"all\" or extra == \"url-preview\"" +markers = "extra == \"url-preview\" or extra == \"all\"" files = [ {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"}, {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"}, @@ -1284,7 +1284,7 @@ description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" +markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" files = [ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, @@ -1526,7 +1526,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, ] @@ -1716,7 +1716,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"all\" or extra == \"postgres\"" +markers = "extra == \"postgres\" or extra == \"all\"" files = [ {file = "psycopg2-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8"}, {file = "psycopg2-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb"}, @@ -1734,7 +1734,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" files = [ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, ] @@ -1750,7 +1750,7 @@ description = "A Simple library to enable psycopg2 compatability" optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" files = [ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, ] @@ -2031,7 +2031,7 @@ description = "A development tool to measure, monitor and analyze the memory beh optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"all\" or extra == \"cache-memory\"" +markers = "extra == \"cache-memory\" or extra == \"all\"" files = [ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, @@ -2091,7 +2091,7 @@ description = "Python implementation of SAML Version 2 Standard" optional = true python-versions = ">=3.9,<4.0" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"}, {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"}, @@ -2116,7 +2116,7 @@ description = "Extensions to the standard Python datetime module" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -2144,7 +2144,7 @@ description = "World timezone definitions, modern and historical" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, @@ -2548,7 +2548,7 @@ description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"all\" or extra == \"sentry\"" +markers = "extra == \"sentry\" or extra == \"all\"" files = [ {file = "sentry_sdk-2.46.0-py2.py3-none-any.whl", hash = "sha256:4eeeb60198074dff8d066ea153fa6f241fef1668c10900ea53a4200abc8da9b1"}, {file = "sentry_sdk-2.46.0.tar.gz", hash = "sha256:91821a23460725734b7741523021601593f35731808afc0bb2ba46c27b8acd91"}, @@ -2723,6 +2723,22 @@ files = [ {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, ] +[[package]] +name = "sqlglot" +version = "28.0.0" +description = "An easily customizable SQL parser and transpiler" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "sqlglot-28.0.0-py3-none-any.whl", hash = "sha256:ac1778e7fa4812f4f7e5881b260632fc167b00ca4c1226868891fb15467122e4"}, + {file = "sqlglot-28.0.0.tar.gz", hash = "sha256:cc9a651ef4182e61dac58aa955e5fb21845a5865c6a4d7d7b5a7857450285ad4"}, +] + +[package.extras] +dev = ["duckdb (>=0.6)", "maturin (>=1.4,<2.0)", "mypy", "pandas", "pandas-stubs", "pdoc", "pre-commit", "pyperf", "python-dateutil", "pytz", "ruff (==0.7.2)", "types-python-dateutil", "types-pytz", "typing_extensions"] +rs = ["sqlglotrs (==0.7.3)"] + [[package]] name = "systemd-python" version = "235" @@ -2742,7 +2758,7 @@ description = "Tornado IOLoop Backed Concurrent Futures" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, @@ -2758,7 +2774,7 @@ description = "Python bindings for the Apache Thrift RPC system" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] @@ -2831,7 +2847,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"all\" or extra == \"opentracing\"" +markers = "extra == \"opentracing\" or extra == \"all\"" files = [ {file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"}, {file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"}, @@ -2965,7 +2981,7 @@ description = "non-blocking redis client for python" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"all\" or extra == \"redis\"" +markers = "extra == \"redis\" or extra == \"all\"" files = [ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"}, {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"}, @@ -3211,7 +3227,7 @@ description = "An XML Schema validator and decoder" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"all\" or extra == \"saml2\"" +markers = "extra == \"saml2\" or extra == \"all\"" files = [ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"}, {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"}, @@ -3346,4 +3362,4 @@ url-preview = ["lxml"] [metadata] lock-version = "2.1" python-versions = ">=3.10.0,<4.0.0" -content-hash = "4f8d98723236eaf3d13f440dce95ec6cc3c4dc49ba3a0e45bf9cfbb51aca899c" +content-hash = "98b9062f48205a3bcc99b43ae665083d360a15d4a208927fa978df9c36fd5315" diff --git a/pyproject.toml b/pyproject.toml index fabc483b3c..5ee843365d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -370,6 +370,9 @@ towncrier = ">=18.6.0rc1" # Used for checking the Poetry lockfile tomli = ">=1.2.3" +# Used for checking the schema delta files +sqlglot = ">=28.0.0" + [build-system] # The upper bounds here are defensive, intended to prevent situations like diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py index dd96c904bb..d344083148 100755 --- a/scripts-dev/check_schema_delta.py +++ b/scripts-dev/check_schema_delta.py @@ -9,15 +9,11 @@ import click import git +import sqlglot +import sqlglot.expressions SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$") -INDEX_CREATION_REGEX = re.compile( - r"CREATE .*INDEX .*ON ([a-z_0-9]+)", flags=re.IGNORECASE -) -INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_0-9]+)", flags=re.IGNORECASE) -TABLE_CREATION_REGEX = re.compile( - r"CREATE .*TABLE.* ([a-z_0-9]+)\s*\(", flags=re.IGNORECASE -) + # The base branch we want to check against. We use the main development branch # on the assumption that is what we are developing against. @@ -141,6 +137,9 @@ def main(force_colors: bool) -> None: color=force_colors, ) + # Mark this run as not successful, but continue so that we report *all* + # errors. + return_code = 1 else: click.secho( f"All deltas are in the correct folder: {current_schema_version}!", @@ -153,60 +152,90 @@ def main(force_colors: bool) -> None: # and delta files are also numbered in order. changed_delta_files.sort() - # Now check that we're not trying to create or drop indices. If we want to - # do that they should be in background updates. The exception is when we - # create indices on tables we've just created. - created_tables = set() - for delta_file in changed_delta_files: - with open(delta_file) as fd: - delta_lines = fd.readlines() - - for line in delta_lines: - # Strip SQL comments - line = line.split("--", maxsplit=1)[0] - - # Check and track any tables we create - match = TABLE_CREATION_REGEX.search(line) - if match: - table_name = match.group(1) - created_tables.add(table_name) - - # Check for dropping indices, these are always banned - match = INDEX_DELETION_REGEX.search(line) - if match: - clause = match.group() - - click.secho( - f"Found delta with index deletion: '{clause}' in {delta_file}", - fg="red", - bold=True, - color=force_colors, - ) - click.secho( - " ↪ These should be in background updates.", - ) - return_code = 1 - - # Check for index creation, which is only allowed for tables we've - # created. - match = INDEX_CREATION_REGEX.search(line) - if match: - clause = match.group() - table_name = match.group(1) - if table_name not in created_tables: - click.secho( - f"Found delta with index creation for existing table: '{clause}' in {delta_file}", - fg="red", - bold=True, - color=force_colors, - ) - click.secho( - " ↪ These should be in background updates (or the table should be created in the same delta).", - ) - return_code = 1 + success = check_schema_delta(changed_delta_files, force_colors) + if not success: + return_code = 1 click.get_current_context().exit(return_code) +def check_schema_delta(delta_files: list[str], force_colors: bool) -> bool: + """Check that the given schema delta files do not create or drop indices + inappropriately. + + Index creation is only allowed on tables created in the same set of deltas. + + Index deletion is never allowed and should be done in background updates. + + Returns: + True if all checks succeeded, False if at least one failed. + """ + + # The tables created in this delta + created_tables = set[str]() + + # The indices created/dropped in this delta, each a tuple of (table_name, sql) + created_indices = list[tuple[str, str]]() + + # The indices dropped in this delta, just the sql + dropped_indices = list[str]() + + for delta_file in delta_files: + with open(delta_file) as fd: + delta_contents = fd.read() + + # Assume the SQL dialect from the file extension, defaulting to Postgres. + sql_lang = "postgres" + if delta_file.endswith(".sqlite"): + sql_lang = "sqlite" + + statements = sqlglot.parse(delta_contents, read=sql_lang) + + for statement in statements: + if isinstance(statement, sqlglot.expressions.Create): + if statement.kind == "TABLE": + assert isinstance(statement.this, sqlglot.expressions.Schema) + assert isinstance(statement.this.this, sqlglot.expressions.Table) + + table_name = statement.this.this.name + created_tables.add(table_name) + elif statement.kind == "INDEX": + assert isinstance(statement.this, sqlglot.expressions.Index) + + table_name = statement.this.args["table"].name + created_indices.append((table_name, statement.sql())) + elif isinstance(statement, sqlglot.expressions.Drop): + if statement.kind == "INDEX": + dropped_indices.append(statement.sql()) + + success = True + for table_name, clause in created_indices: + if table_name not in created_tables: + click.secho( + f"Found delta with index creation for existing table: '{clause}'", + fg="red", + bold=True, + color=force_colors, + ) + click.secho( + " ↪ These should be in background updates (or the table should be created in the same delta).", + ) + success = False + + for clause in dropped_indices: + click.secho( + f"Found delta with index deletion: '{clause}'", + fg="red", + bold=True, + color=force_colors, + ) + click.secho( + " ↪ These should be in background updates.", + ) + success = False + + return success + + if __name__ == "__main__": main() From 778897a4e9a43522a1698a205f68feb3c601ee2d Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 28 Nov 2025 17:01:15 +0000 Subject: [PATCH 20/47] Add a unit test that ensures that deleting a device purges the associated refresh token (#19230) --- changelog.d/19230.misc | 1 + tests/handlers/test_device.py | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 changelog.d/19230.misc diff --git a/changelog.d/19230.misc b/changelog.d/19230.misc new file mode 100644 index 0000000000..06704db25b --- /dev/null +++ b/changelog.d/19230.misc @@ -0,0 +1 @@ +Add a unit test for ensuring associated refresh tokens are erased when a device is delted. \ No newline at end of file diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index acd37a1c71..fa6bb4970b 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -449,6 +449,33 @@ def test_on_federation_query_user_devices_appservice(self) -> None: ], ) + def test_delete_device_removes_refresh_tokens(self) -> None: + """Deleting a device should also purge any refresh tokens for it.""" + self._record_users() + + self.get_success( + self.store.add_refresh_token_to_user( + user_id=user1, + token="refresh_token", + device_id="abc", + expiry_ts=None, + ultimate_session_expiry_ts=None, + ) + ) + + self.get_success(self.handler.delete_devices(user1, ["abc"])) + + remaining_refresh_token = self.get_success( + self.store.db_pool.simple_select_one( + table="refresh_tokens", + keyvalues={"user_id": user1, "device_id": "abc"}, + retcols=("id",), + desc="get_refresh_token_for_device", + allow_none=True, + ) + ) + self.assertIsNone(remaining_refresh_token) + class DehydrationTestCase(unittest.HomeserverTestCase): servlets = [ From 034c5e625c0ae63033a49a5288ee0b65b53dc0f4 Mon Sep 17 00:00:00 2001 From: Andrew Morgan <1342360+anoadragon453@users.noreply.github.com> Date: Fri, 28 Nov 2025 17:41:56 +0000 Subject: [PATCH 21/47] Move call invite filtering logic to `filter_events_for_client` (#17782) --- changelog.d/17782.misc | 1 + synapse/handlers/sliding_sync/__init__.py | 2 -- synapse/handlers/sync.py | 12 +------ synapse/visibility.py | 38 +++++++++++++++++++++-- 4 files changed, 38 insertions(+), 15 deletions(-) create mode 100644 changelog.d/17782.misc diff --git a/changelog.d/17782.misc b/changelog.d/17782.misc new file mode 100644 index 0000000000..d7321470d0 --- /dev/null +++ b/changelog.d/17782.misc @@ -0,0 +1 @@ +Improve event filtering for Simplified Sliding Sync. \ No newline at end of file diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 6a5d5c7b3c..68135e9cd3 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -761,8 +761,6 @@ async def get_room_sync_data( != Membership.JOIN, filter_send_to_client=True, ) - # TODO: Filter out `EventTypes.CallInvite` in public rooms, - # see https://github.com/element-hq/synapse/issues/17359 # TODO: Handle timeline gaps (`get_timeline_gaps()`) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b534e24698..60d8827425 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -36,7 +36,6 @@ Direction, EventContentFields, EventTypes, - JoinRules, Membership, ) from synapse.api.filtering import FilterCollection @@ -790,22 +789,13 @@ async def _load_filtered_recents( ) ) - filtered_recents = await filter_events_for_client( + loaded_recents = await filter_events_for_client( self._storage_controllers, sync_config.user.to_string(), loaded_recents, always_include_ids=current_state_ids, ) - loaded_recents = [] - for event in filtered_recents: - if event.type == EventTypes.CallInvite: - room_info = await self.store.get_room_with_stats(event.room_id) - assert room_info is not None - if room_info.join_rules == JoinRules.PUBLIC: - continue - loaded_recents.append(event) - log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)}) loaded_recents.extend(recents) diff --git a/synapse/visibility.py b/synapse/visibility.py index 16b39e6200..bfa0db5670 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -33,6 +33,7 @@ EventTypes, EventUnsignedContentFields, HistoryVisibility, + JoinRules, Membership, ) from synapse.events import EventBase @@ -111,7 +112,17 @@ async def filter_events_for_client( # happen within the function. events_before_filtering = events.copy() # Default case is to *exclude* soft-failed events - events = [e for e in events if not e.internal_metadata.is_soft_failed()] + events = [] + found_call_invite = False + for event in events_before_filtering: + if event.internal_metadata.is_soft_failed(): + continue + + if event.type == EventTypes.CallInvite and not event.is_state(): + found_call_invite = True + + events.append(event) + client_config = await storage.main.get_admin_client_config_for_user(user_id) if filter_send_to_client and await storage.main.is_server_admin(user_id): if client_config.return_soft_failed_events: @@ -139,7 +150,11 @@ async def filter_events_for_client( [event.event_id for event in events], ) - types = (_HISTORY_VIS_KEY, (EventTypes.Member, user_id)) + types = [_HISTORY_VIS_KEY, (EventTypes.Member, user_id)] + if found_call_invite: + # We need to fetch the room's join rules state to determine + # whether to allow call invites in public rooms. + types.append((EventTypes.JoinRules, "")) # we exclude outliers at this point, and then handle them separately later event_id_to_state = await storage.state.get_state_for_events( @@ -178,6 +193,25 @@ def allowed(event: EventBase) -> EventBase | None: if filtered is None: return None + # Filter out call invites in public rooms, as this would potentially + # ring a lot of users. + if event.type == EventTypes.CallInvite and not event.is_state(): + # `state_after_event` should only be None if the event is an outlier, + # and earlier code should filter out outliers entirely. + # + # In addition, we only create outliers locally for out-of-band + # invite rejections, invites received over federation, or state + # events needed to authorise other events. None of this applies to + # call invites. + assert state_after_event is not None + + room_join_rules = state_after_event.get((EventTypes.JoinRules, "")) + if ( + room_join_rules is not None + and room_join_rules.content.get("join_rule") == JoinRules.PUBLIC + ): + return None + # Annotate the event with the user's membership after the event. # # Normally we just look in `state_after_event`, but if the event is an outlier From d143276bda3b14cf1f9a05dfcb26a2380d472847 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Dec 2025 13:34:21 +0000 Subject: [PATCH 22/47] Fix rust source check when using .egg-info (#19251) We have checks to try and catch the case where Synapse is being run from a source directory, but the compiled Rust code is out-of-date. This commonly happens when Synapse is updated without running `poetry install` (or equivalent). These checks did not correctly handle `.egg-info` installs, and so were not run. Currently, the `.egg-info` directory is created automatically by poetry (due to using setuptools to build Rust). --- changelog.d/19251.misc | 1 + synapse/util/rust.py | 33 ++++++++++++++++++++++++++++++++- 2 files changed, 33 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19251.misc diff --git a/changelog.d/19251.misc b/changelog.d/19251.misc new file mode 100644 index 0000000000..9d0501c3d4 --- /dev/null +++ b/changelog.d/19251.misc @@ -0,0 +1 @@ +Fix check of the Rust compiled code being outdated when using source checkout and `.egg-info`. diff --git a/synapse/util/rust.py b/synapse/util/rust.py index 63b53b917f..d1e1a259e4 100644 --- a/synapse/util/rust.py +++ b/synapse/util/rust.py @@ -111,7 +111,38 @@ def get_synapse_source_directory() -> str | None: # c.f. https://packaging.python.org/en/latest/specifications/direct-url/ direct_url_json = package.read_text("direct_url.json") if direct_url_json is None: - return None + # No direct url metadata. Check if this is an egg-info install. + # + # An egg-info install is when there exists a `matrix_synapse.egg-info` + # directory alongside the source tree, containing the package metadata. + # This allows discovering packages in the current directory, without + # installing them properly to the environment wide `site-packages` + # directory. + # + # When searching for a package, Python will look for `.egg-info` files + # in the current working directory before looking in `site-packages`. + # This means that when running Synapse (or the tests) from the source + # tree Python will pick up the synapse package from the egg-info + # install. + # + # Poetry will create an egg-info install when running `poetry install`. + # + # The combination of the above means that it is very common for + # developers (e.g. running tests) to encounter egg-info installs. + # + # In this case we can find the source tree by looking for the + # `matrix_synapse.egg-info/PKG-INFO` file, and going up two directories + # from there. + + metadata_path = package.locate_file("matrix_synapse.egg-info/PKG-INFO") + if not os.path.exists(str(metadata_path)): + # Not an egg-info install. + return None + + # `metadata_path` points to the egg-info/PKG-INFO file, so go up two + # directories to get the root of the source tree. + source_dir = metadata_path.parent.parent + return os.fspath(source_dir) # c.f. https://packaging.python.org/en/latest/specifications/direct-url/ for # the format From 1bddd25a85d82b2ef4a2a42f6ecd476108d7dd96 Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Mon, 1 Dec 2025 13:55:06 +0000 Subject: [PATCH 23/47] Port `Clock` functions to use `Duration` class (#19229) This changes the arguments in clock functions to be `Duration` and converts call sites and constants into `Duration`. There are still some more functions around that should be converted (e.g. `timeout_deferred`), but we leave that to another PR. We also changes `.as_secs()` to return a float, as the rounding broke things subtly. The only reason to keep it (its the same as `timedelta.total_seconds()`) is for symmetry with `as_millis()`. Follows on from https://github.com/element-hq/synapse/pull/19223 --- changelog.d/19229.misc | 1 + rust/src/duration.rs | 56 +++++++++++++ rust/src/lib.rs | 1 + rust/src/rendezvous/mod.rs | 5 +- synapse/api/ratelimiting.py | 3 +- synapse/app/phone_stats_home.py | 12 +-- synapse/appservice/scheduler.py | 3 +- synapse/federation/federation_client.py | 3 +- synapse/federation/federation_server.py | 7 +- synapse/federation/send_queue.py | 3 +- synapse/federation/sender/__init__.py | 13 +-- synapse/handlers/account_validity.py | 3 +- synapse/handlers/auth.py | 3 +- synapse/handlers/delayed_events.py | 9 ++- synapse/handlers/device.py | 13 ++- synapse/handlers/e2e_keys.py | 3 +- synapse/handlers/federation.py | 5 +- synapse/handlers/federation_event.py | 3 +- synapse/handlers/message.py | 18 ++--- synapse/handlers/pagination.py | 3 +- synapse/handlers/presence.py | 17 ++-- synapse/handlers/profile.py | 3 +- synapse/handlers/room.py | 3 +- synapse/handlers/room_member.py | 9 ++- synapse/handlers/stats.py | 3 +- synapse/handlers/typing.py | 26 +++--- synapse/handlers/user_directory.py | 19 +++-- synapse/handlers/worker_lock.py | 4 +- synapse/http/client.py | 5 +- .../http/federation/well_known_resolver.py | 3 +- synapse/http/server.py | 3 +- synapse/media/media_repository.py | 9 ++- synapse/media/media_storage.py | 5 +- synapse/media/url_previewer.py | 5 +- synapse/metrics/common_usage_metrics.py | 3 +- synapse/module_api/__init__.py | 8 +- synapse/notifier.py | 11 ++- synapse/push/emailpusher.py | 3 +- synapse/push/httppusher.py | 5 +- synapse/replication/http/_base.py | 5 +- synapse/replication/tcp/client.py | 3 +- synapse/replication/tcp/protocol.py | 5 +- synapse/replication/tcp/redis.py | 3 +- synapse/replication/tcp/resource.py | 3 +- synapse/rest/client/account.py | 13 ++- synapse/rest/client/register.py | 9 ++- synapse/rest/client/transactions.py | 2 +- synapse/state/__init__.py | 3 +- synapse/state/v2.py | 15 ++-- synapse/storage/background_updates.py | 3 +- synapse/storage/controllers/purge_events.py | 3 +- synapse/storage/database.py | 7 +- synapse/storage/databases/main/cache.py | 13 +-- .../storage/databases/main/censor_events.py | 3 +- synapse/storage/databases/main/client_ips.py | 5 +- synapse/storage/databases/main/deviceinbox.py | 4 +- synapse/storage/databases/main/devices.py | 3 +- .../databases/main/event_federation.py | 7 +- .../databases/main/event_push_actions.py | 11 ++- .../storage/databases/main/events_worker.py | 3 +- synapse/storage/databases/main/lock.py | 11 +-- synapse/storage/databases/main/metrics.py | 3 +- .../storage/databases/main/registration.py | 9 +-- synapse/storage/databases/main/roommember.py | 5 +- synapse/storage/databases/main/session.py | 3 +- .../storage/databases/main/sliding_sync.py | 2 +- .../storage/databases/main/transactions.py | 3 +- synapse/util/async_helpers.py | 7 +- synapse/util/batching_queue.py | 3 +- synapse/util/caches/expiringcache.py | 3 +- synapse/util/caches/lrucache.py | 7 +- synapse/util/caches/response_cache.py | 7 +- synapse/util/clock.py | 33 ++++---- synapse/util/duration.py | 81 ++++++++++++++++++- synapse/util/ratelimitutils.py | 7 +- synapse/util/task_scheduler.py | 15 ++-- synmark/suites/logging.py | 3 +- .../federation/transport/server/test__base.py | 5 +- tests/handlers/test_device.py | 2 +- tests/handlers/test_typing.py | 2 +- tests/http/test_servlet.py | 5 +- tests/logging/test_opentracing.py | 9 ++- tests/replication/http/test__base.py | 5 +- tests/rest/admin/test_background_updates.py | 3 +- tests/rest/admin/test_room.py | 7 +- tests/rest/client/test_transactions.py | 3 +- tests/server_notices/__init__.py | 3 +- tests/state/test_v2.py | 3 +- tests/state/test_v21.py | 3 +- tests/storage/databases/main/test_lock.py | 4 +- tests/storage/test_background_update.py | 5 +- tests/test_server.py | 9 ++- tests/util/caches/test_response_cache.py | 5 +- tests/util/test_logcontext.py | 55 +++++++------ tests/util/test_task_scheduler.py | 7 +- 95 files changed, 511 insertions(+), 260 deletions(-) create mode 100644 changelog.d/19229.misc create mode 100644 rust/src/duration.rs diff --git a/changelog.d/19229.misc b/changelog.d/19229.misc new file mode 100644 index 0000000000..8caebead72 --- /dev/null +++ b/changelog.d/19229.misc @@ -0,0 +1 @@ +Move towards using a dedicated `Duration` type. diff --git a/rust/src/duration.rs b/rust/src/duration.rs new file mode 100644 index 0000000000..a3dbe919b2 --- /dev/null +++ b/rust/src/duration.rs @@ -0,0 +1,56 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2025 Element Creations, Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * . + */ + +use once_cell::sync::OnceCell; +use pyo3::{ + types::{IntoPyDict, PyAnyMethods}, + Bound, BoundObject, IntoPyObject, Py, PyAny, PyErr, PyResult, Python, +}; + +/// A reference to the `synapse.util.duration` module. +static DURATION: OnceCell> = OnceCell::new(); + +/// Access to the `synapse.util.duration` module. +fn duration_module(py: Python<'_>) -> PyResult<&Bound<'_, PyAny>> { + Ok(DURATION + .get_or_try_init(|| py.import("synapse.util.duration").map(Into::into))? + .bind(py)) +} + +/// Mirrors the `synapse.util.duration.Duration` Python class. +pub struct SynapseDuration { + microseconds: u64, +} + +impl SynapseDuration { + /// For now we only need to create durations from milliseconds. + pub fn from_milliseconds(milliseconds: u64) -> Self { + Self { + microseconds: milliseconds * 1_000, + } + } +} + +impl<'py> IntoPyObject<'py> for &SynapseDuration { + type Target = PyAny; + type Output = Bound<'py, Self::Target>; + type Error = PyErr; + + fn into_pyobject(self, py: Python<'py>) -> Result { + let duration_module = duration_module(py)?; + let kwargs = [("microseconds", self.microseconds)].into_py_dict(py)?; + let duration_instance = duration_module.call_method("Duration", (), Some(&kwargs))?; + Ok(duration_instance.into_bound()) + } +} diff --git a/rust/src/lib.rs b/rust/src/lib.rs index 6522148fa1..fe880af2ea 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -5,6 +5,7 @@ use pyo3::prelude::*; use pyo3_log::ResetHandle; pub mod acl; +pub mod duration; pub mod errors; pub mod events; pub mod http; diff --git a/rust/src/rendezvous/mod.rs b/rust/src/rendezvous/mod.rs index 848b5035bb..9a6da9fcc3 100644 --- a/rust/src/rendezvous/mod.rs +++ b/rust/src/rendezvous/mod.rs @@ -35,6 +35,7 @@ use ulid::Ulid; use self::session::Session; use crate::{ + duration::SynapseDuration, errors::{NotFoundError, SynapseError}, http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt}, UnwrapInfallible, @@ -132,6 +133,8 @@ impl RendezvousHandler { .unwrap_infallible() .unbind(); + let eviction_duration = SynapseDuration::from_milliseconds(eviction_interval); + // Construct a Python object so that we can get a reference to the // evict method and schedule it to run. let self_ = Py::new( @@ -149,7 +152,7 @@ impl RendezvousHandler { let evict = self_.getattr(py, "_evict")?; homeserver.call_method0("get_clock")?.call_method( "looping_call", - (evict, eviction_interval), + (evict, &eviction_duration), None, )?; diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index df884d47d7..d6cc3d26b5 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -27,6 +27,7 @@ from synapse.storage.databases.main import DataStore from synapse.types import Requester from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.wheel_timer import WheelTimer if TYPE_CHECKING: @@ -100,7 +101,7 @@ def __init__( # and doesn't affect correctness. self._timer: WheelTimer[Hashable] = WheelTimer() - self.clock.looping_call(self._prune_message_counts, 15 * 1000) + self.clock.looping_call(self._prune_message_counts, Duration(seconds=15)) def _get_key(self, requester: Requester | None, key: Hashable | None) -> Hashable: """Use the requester's MXID as a fallback key if no key is provided.""" diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index d278e30850..7b4bf25c28 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -218,13 +218,13 @@ def performance_stats_init() -> None: # table will decrease clock.looping_call( hs.get_datastores().main.generate_user_daily_visits, - Duration(minutes=5).as_millis(), + Duration(minutes=5), ) # monthly active user limiting functionality clock.looping_call( hs.get_datastores().main.reap_monthly_active_users, - Duration(hours=1).as_millis(), + Duration(hours=1), ) hs.get_datastores().main.reap_monthly_active_users() @@ -263,14 +263,14 @@ async def _generate_monthly_active_users() -> None: if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: generate_monthly_active_users() - clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000) + clock.looping_call(generate_monthly_active_users, Duration(minutes=5)) # End of monthly active user settings if hs.config.metrics.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call( phone_stats_home, - PHONE_HOME_INTERVAL.as_millis(), + PHONE_HOME_INTERVAL, hs, stats, ) @@ -278,14 +278,14 @@ async def _generate_monthly_active_users() -> None: # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process clock.call_later( - 0, + Duration(seconds=0), performance_stats_init, ) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later( - INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME.as_secs(), + INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME, phone_stats_home, hs, stats, diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 250f84d644..befb4ae44b 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -77,6 +77,7 @@ from synapse.storage.databases.main import DataStore from synapse.types import DeviceListUpdates, JsonMapping from synapse.util.clock import Clock, DelayedCallWrapper +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -504,7 +505,7 @@ def __init__( self.scheduled_recovery: DelayedCallWrapper | None = None def recover(self) -> None: - delay = 2**self.backoff_counter + delay = Duration(seconds=2**self.backoff_counter) logger.info("Scheduling retries on %s in %fs", self.service.id, delay) self.scheduled_recovery = self.clock.call_later( delay, diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 4110a90ed6..ba738ad65e 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -75,6 +75,7 @@ from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.duration import Duration from synapse.util.retryutils import NotRetryingDestination if TYPE_CHECKING: @@ -132,7 +133,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.pdu_destination_tried: dict[str, dict[str, int]] = {} - self._clock.looping_call(self._clear_tried_cache, 60 * 1000) + self._clock.looping_call(self._clear_tried_cache, Duration(minutes=1)) self.state = hs.get_state_handler() self.transport_layer = hs.get_federation_transport_client() diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 34abac1cec..b909f1e595 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -89,6 +89,7 @@ from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache +from synapse.util.duration import Duration from synapse.util.stringutils import parse_server_name if TYPE_CHECKING: @@ -226,7 +227,7 @@ async def _handle_old_staged_events(self) -> None: ) # We pause a bit so that we don't start handling all rooms at once. - await self._clock.sleep(random.uniform(0, 0.1)) + await self._clock.sleep(Duration(seconds=random.uniform(0, 0.1))) async def on_backfill_request( self, origin: str, room_id: str, versions: list[str], limit: int @@ -301,7 +302,9 @@ async def on_incoming_transaction( # Start a periodic check for old staged events. This is to handle # the case where locks time out, e.g. if another process gets killed # without dropping its locks. - self._clock.looping_call(self._handle_old_staged_events, 60 * 1000) + self._clock.looping_call( + self._handle_old_staged_events, Duration(minutes=1) + ) # keep this as early as possible to make the calculated origin ts as # accurate as possible. diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index cf70e10a58..4a6d155217 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -53,6 +53,7 @@ from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.replication.tcp.streams.federation import FederationStream from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection +from synapse.util.duration import Duration from synapse.util.metrics import Measure from .units import Edu @@ -137,7 +138,7 @@ def register(queue_name: QueueNames, queue: Sized) -> None: assert isinstance(queue, Sized) register(queue_name, queue=queue) - self.clock.looping_call(self._clear_queue, 30 * 1000) + self.clock.looping_call(self._clear_queue, Duration(seconds=30)) def shutdown(self) -> None: """Stops this federation sender instance from sending further transactions.""" diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 0bd97c25df..f7240c2f7f 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -174,6 +174,7 @@ get_domain_from_id, ) from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.metrics import Measure from synapse.util.retryutils import filter_destinations_by_retry_limiter @@ -218,12 +219,12 @@ # Please note that rate limiting still applies, so while the loop is # executed every X seconds the destinations may not be woken up because # they are being rate limited following previous attempt failures. -WAKEUP_RETRY_PERIOD_SEC = 60 +WAKEUP_RETRY_PERIOD = Duration(minutes=1) -# Time (in s) to wait in between waking up each destination, i.e. one destination +# Time to wait in between waking up each destination, i.e. one destination # will be woken up every seconds until we have woken every destination # has outstanding catch-up. -WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5 +WAKEUP_INTERVAL_BETWEEN_DESTINATIONS = Duration(seconds=5) class AbstractFederationSender(metaclass=abc.ABCMeta): @@ -379,7 +380,7 @@ async def _handle(self) -> None: queue.attempt_new_transaction() - await self.clock.sleep(current_sleep_seconds) + await self.clock.sleep(Duration(seconds=current_sleep_seconds)) if not self.queue: break @@ -468,7 +469,7 @@ def __init__(self, hs: "HomeServer"): # Regularly wake up destinations that have outstanding PDUs to be caught up self.clock.looping_call_now( self.hs.run_as_background_process, - WAKEUP_RETRY_PERIOD_SEC * 1000.0, + WAKEUP_RETRY_PERIOD, "wake_destinations_needing_catchup", self._wake_destinations_needing_catchup, ) @@ -1161,4 +1162,4 @@ async def _wake_destinations_needing_catchup(self) -> None: last_processed, ) self.wake_destination(destination) - await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC) + await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index bc50efa1a7..ba40d5763e 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -28,6 +28,7 @@ from synapse.types import UserID from synapse.util import stringutils from synapse.util.async_helpers import delay_cancellation +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -73,7 +74,7 @@ def __init__(self, hs: "HomeServer"): # Check the renewal emails to send and send them every 30min. if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) + self.clock.looping_call(self._send_renewal_emails, Duration(minutes=30)) async def is_user_expired(self, user_id: str) -> bool: """Checks if a user has expired against third-party modules. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index d9355d33da..b5c0cbdba2 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -74,6 +74,7 @@ from synapse.types import JsonDict, Requester, StrCollection, UserID from synapse.util import stringutils as stringutils from synapse.util.async_helpers import delay_cancellation, maybe_awaitable +from synapse.util.duration import Duration from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import base62_encode from synapse.util.threepids import canonicalise_email @@ -242,7 +243,7 @@ def __init__(self, hs: "HomeServer"): if hs.config.worker.run_background_tasks: self._clock.looping_call( run_as_background_process, - 5 * 60 * 1000, + Duration(minutes=5), "expire_old_sessions", self.server_name, self._expire_old_sessions, diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py index 8817b65316..cb0a4dd6b2 100644 --- a/synapse/handlers/delayed_events.py +++ b/synapse/handlers/delayed_events.py @@ -42,6 +42,7 @@ UserID, create_requester, ) +from synapse.util.duration import Duration from synapse.util.events import generate_fake_event_id from synapse.util.metrics import Measure from synapse.util.sentinel import Sentinel @@ -92,7 +93,7 @@ async def _schedule_db_events() -> None: # Kick off again (without blocking) to catch any missed notifications # that may have fired before the callback was added. self._clock.call_later( - 0, + Duration(seconds=0), self.notify_new_event, ) @@ -508,17 +509,17 @@ def _schedule_next_at_or_none(self, next_send_ts: Timestamp | None) -> None: def _schedule_next_at(self, next_send_ts: Timestamp) -> None: delay = next_send_ts - self._get_current_ts() - delay_sec = delay / 1000 if delay > 0 else 0 + delay_duration = Duration(milliseconds=max(delay, 0)) if self._next_delayed_event_call is None: self._next_delayed_event_call = self._clock.call_later( - delay_sec, + delay_duration, self.hs.run_as_background_process, "_send_on_timeout", self._send_on_timeout, ) else: - self._next_delayed_event_call.reset(delay_sec) + self._next_delayed_event_call.reset(delay_duration.as_secs()) async def get_all_for_user(self, requester: Requester) -> list[JsonDict]: """Return all pending delayed events requested by the given user.""" diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 3f1a5fe6d6..1b7de57ab9 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -71,6 +71,7 @@ from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.metrics import measure_func from synapse.util.retryutils import ( NotRetryingDestination, @@ -85,7 +86,7 @@ DELETE_DEVICE_MSGS_TASK_NAME = "delete_device_messages" MAX_DEVICE_DISPLAY_NAME_LEN = 100 -DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000 +DELETE_STALE_DEVICES_INTERVAL = Duration(days=1) def _check_device_name_length(name: str | None) -> None: @@ -186,7 +187,7 @@ def __init__(self, hs: "HomeServer"): ): self.clock.looping_call( self.hs.run_as_background_process, - DELETE_STALE_DEVICES_INTERVAL_MS, + DELETE_STALE_DEVICES_INTERVAL, desc="delete_stale_devices", func=self._delete_stale_devices, ) @@ -915,7 +916,7 @@ async def handle_new_device_update(self) -> None: ) DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000 - DEVICE_MSGS_DELETE_SLEEP_MS = 100 + DEVICE_MSGS_DELETE_SLEEP = Duration(milliseconds=100) async def _delete_device_messages( self, @@ -941,9 +942,7 @@ async def _delete_device_messages( if from_stream_id is None: return TaskStatus.COMPLETE, None, None - await self.clock.sleep( - DeviceWriterHandler.DEVICE_MSGS_DELETE_SLEEP_MS / 1000.0 - ) + await self.clock.sleep(DeviceWriterHandler.DEVICE_MSGS_DELETE_SLEEP) class DeviceWriterHandler(DeviceHandler): @@ -1469,7 +1468,7 @@ def __init__(self, hs: "HomeServer", device_handler: DeviceWriterHandler): self._resync_retry_lock = Lock() self.clock.looping_call( self.hs.run_as_background_process, - 30 * 1000, + Duration(seconds=30), func=self._maybe_retry_device_resync, desc="_maybe_retry_device_resync", ) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 41d27d47da..64f705a3da 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -46,6 +46,7 @@ ) from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.json import json_decoder from synapse.util.retryutils import ( NotRetryingDestination, @@ -1634,7 +1635,7 @@ async def _delete_old_one_time_keys_task( # matrix.org has about 15M users in the e2e_one_time_keys_json table # (comprising 20M devices). We want this to take about a week, so we need # to do about one batch of 100 users every 4 seconds. - await self.clock.sleep(4) + await self.clock.sleep(Duration(seconds=4)) def _check_cross_signing_key( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1bba3fc758..7808f8928b 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -72,6 +72,7 @@ from synapse.types import JsonDict, StrCollection, get_domain_from_id from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer +from synapse.util.duration import Duration from synapse.util.retryutils import NotRetryingDestination from synapse.visibility import filter_events_for_server @@ -1972,7 +1973,9 @@ async def _sync_partial_state_room( logger.warning( "%s; waiting for %d ms...", e, e.retry_after_ms ) - await self.clock.sleep(e.retry_after_ms / 1000) + await self.clock.sleep( + Duration(milliseconds=e.retry_after_ms) + ) # Success, no need to try the rest of the destinations. break diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 01e98f60ad..e314180e12 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -91,6 +91,7 @@ ) from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer, concurrently_execute +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter, partition, sorted_topologically from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr @@ -1802,7 +1803,7 @@ async def prep(event: EventBase) -> None: # the reactor. For large rooms let's yield to the reactor # occasionally to ensure we don't block other work. if (i + 1) % 1000 == 0: - await self._clock.sleep(0) + await self._clock.sleep(Duration(seconds=0)) # Also persist the new event in batches for similar reasons as above. for batch in batch_iter(events_and_contexts_to_persist, 1000): diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index 7679303a36..bac4bd9361 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -83,6 +83,7 @@ from synapse.util import log_failure, unwrapFirstError from synapse.util.async_helpers import Linearizer, gather_results from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.duration import Duration from synapse.util.json import json_decoder, json_encoder from synapse.util.metrics import measure_func from synapse.visibility import get_effective_room_visibility_from_state @@ -433,14 +434,11 @@ def _schedule_expiry_for_event(self, event_id: str, expiry_ts: int) -> None: # Figure out how many seconds we need to wait before expiring the event. now_ms = self.clock.time_msec() - delay = (expiry_ts - now_ms) / 1000 + delay = Duration(milliseconds=max(expiry_ts - now_ms, 0)) - # callLater doesn't support negative delays, so trim the delay to 0 if we're - # in that case. - if delay < 0: - delay = 0 - - logger.info("Scheduling expiry for event %s in %.3fs", event_id, delay) + logger.info( + "Scheduling expiry for event %s in %.3fs", event_id, delay.as_secs() + ) self._scheduled_expiry = self.clock.call_later( delay, @@ -551,7 +549,7 @@ def __init__(self, hs: "HomeServer"): "send_dummy_events_to_fill_extremities", self._send_dummy_events_to_fill_extremities, ), - 5 * 60 * 1000, + Duration(minutes=5), ) self._message_handler = hs.get_message_handler() @@ -1012,7 +1010,7 @@ async def create_and_send_nonmember_event( if not ignore_shadow_ban and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() room_version = None @@ -1515,7 +1513,7 @@ async def handle_new_client_event( and requester.shadow_banned ): # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() if event.is_state(): diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index a90ed3193c..f869a41c5e 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -42,6 +42,7 @@ from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse from synapse.types.state import StateFilter from synapse.util.async_helpers import ReadWriteLock +from synapse.util.duration import Duration from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -116,7 +117,7 @@ def __init__(self, hs: "HomeServer"): self.clock.looping_call( self.hs.run_as_background_process, - job.interval, + Duration(milliseconds=job.interval), "purge_history_for_rooms_in_range", self.purge_history_for_rooms_in_range, job.shortest_max_lifetime, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index ca5002cab3..4c3adca46e 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -121,6 +121,7 @@ get_domain_from_id, ) from synapse.util.async_helpers import Linearizer +from synapse.util.duration import Duration from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -203,7 +204,7 @@ # Delay before a worker tells the presence handler that a user has stopped # syncing. -UPDATE_SYNCING_USERS_MS = 10 * 1000 +UPDATE_SYNCING_USERS = Duration(seconds=10) assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER @@ -528,7 +529,7 @@ def __init__(self, hs: "HomeServer"): self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) - self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS_MS) + self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS) hs.register_async_shutdown_handler( phase="before", @@ -581,7 +582,7 @@ def send_stop_syncing(self) -> None: for (user_id, device_id), last_sync_ms in list( self._user_devices_going_offline.items() ): - if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: + if now - last_sync_ms > UPDATE_SYNCING_USERS.as_millis(): self._user_devices_going_offline.pop((user_id, device_id), None) self.send_user_sync(user_id, device_id, False, last_sync_ms) @@ -861,20 +862,20 @@ def __init__(self, hs: "HomeServer"): # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. self.clock.call_later( - 30, + Duration(seconds=30), self.clock.looping_call, self._handle_timeouts, - 5000, + Duration(seconds=5), ) # Presence information is persisted, whether or not it is being tracked # internally. if self._presence_enabled: self.clock.call_later( - 60, + Duration(minutes=1), self.clock.looping_call, self._persist_unpersisted_changes, - 60 * 1000, + Duration(minutes=1), ) presence_wheel_timer_size_gauge.register_hook( @@ -2430,7 +2431,7 @@ class PresenceFederationQueue: _KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000 # How often to check if we can expire entries from the queue. - _CLEAR_ITEMS_EVERY_MS = 60 * 1000 + _CLEAR_ITEMS_EVERY_MS = Duration(minutes=1) def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler): self._clock = hs.get_clock() diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 59904cd995..8f16ae6dec 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -34,6 +34,7 @@ from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia from synapse.types import JsonDict, JsonValue, Requester, UserID, create_requester from synapse.util.caches.descriptors import cached +from synapse.util.duration import Duration from synapse.util.stringutils import parse_and_validate_mxc_uri if TYPE_CHECKING: @@ -583,7 +584,7 @@ async def _update_join_states( # Do not actually update the room state for shadow-banned users. if requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) return room_ids = await self.store.get_rooms_for_user(target_user.to_string()) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index d62ad5393f..1026bfd876 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -92,6 +92,7 @@ from synapse.util import stringutils from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.response_cache import ResponseCache +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.stringutils import parse_and_validate_server_name from synapse.visibility import filter_events_for_client @@ -1179,7 +1180,7 @@ async def create_room( if (invite_list or invite_3pid_list) and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) # Allow the request to go through, but remove any associated invites. invite_3pid_list = [] diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d5f72c1732..6f8481de9a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -66,6 +66,7 @@ from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_left_room +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -642,7 +643,7 @@ async def update_membership( if action == Membership.INVITE and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() key = (room_id,) @@ -1647,7 +1648,7 @@ async def do_3pid_invite( if requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() # We need to rate limit *before* we send out any 3PID invites, so we @@ -2190,7 +2191,7 @@ def __init__(self, hs: "HomeServer"): # We kick this off to pick up outstanding work from before the last restart. self._clock.call_later( - 0, + Duration(seconds=0), self.notify_new_event, ) @@ -2232,7 +2233,7 @@ async def _unsafe_process(self) -> None: # # We wait for a short time so that we don't "tight" loop just # keeping the table up to date. - await self._clock.sleep(0.5) + await self._clock.sleep(Duration(milliseconds=500)) self.pos = self._store.get_room_max_stream_ordering() await self._store.update_room_forgetter_stream_pos(self.pos) diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 6d661453ac..c87b5f854a 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -32,6 +32,7 @@ from synapse.metrics import SERVER_NAME_LABEL, event_processing_positions from synapse.storage.databases.main.state_deltas import StateDelta from synapse.types import JsonDict +from synapse.util.duration import Duration from synapse.util.events import get_plain_text_topic_from_event_content if TYPE_CHECKING: @@ -72,7 +73,7 @@ def __init__(self, hs: "HomeServer"): # We kick this off so that we don't have to wait for a change before # we start populating stats self.clock.call_later( - 0, + Duration(seconds=0), self.notify_new_event, ) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 8b577d5d58..e66396fecc 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -41,6 +41,7 @@ UserID, ) from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.duration import Duration from synapse.util.metrics import Measure from synapse.util.retryutils import filter_destinations_by_retry_limiter from synapse.util.wheel_timer import WheelTimer @@ -60,15 +61,15 @@ class RoomMember: # How often we expect remote servers to resend us presence. -FEDERATION_TIMEOUT = 60 * 1000 +FEDERATION_TIMEOUT = Duration(minutes=1) # How often to resend typing across federation. -FEDERATION_PING_INTERVAL = 40 * 1000 +FEDERATION_PING_INTERVAL = Duration(seconds=40) # How long to remember a typing notification happened in a room before # forgetting about it. -FORGET_TIMEOUT = 10 * 60 * 1000 +FORGET_TIMEOUT = Duration(minutes=10) class FollowerTypingHandler: @@ -106,7 +107,7 @@ def __init__(self, hs: "HomeServer"): self._rooms_updated: set[str] = set() - self.clock.looping_call(self._handle_timeouts, 5000) + self.clock.looping_call(self._handle_timeouts, Duration(seconds=5)) self.clock.looping_call(self._prune_old_typing, FORGET_TIMEOUT) def _reset(self) -> None: @@ -141,7 +142,10 @@ def _handle_timeout_for_member(self, now: int, member: RoomMember) -> None: # user. if self.federation and self.is_mine_id(member.user_id): last_fed_poke = self._member_last_federation_poke.get(member, None) - if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now: + if ( + not last_fed_poke + or last_fed_poke + FEDERATION_PING_INTERVAL.as_millis() <= now + ): self.hs.run_as_background_process( "typing._push_remote", self._push_remote, @@ -165,7 +169,7 @@ async def _push_remote(self, member: RoomMember, typing: bool) -> None: now = self.clock.time_msec() self.wheel_timer.insert( - now=now, obj=member, then=now + FEDERATION_PING_INTERVAL + now=now, obj=member, then=now + FEDERATION_PING_INTERVAL.as_millis() ) hosts: StrCollection = ( @@ -315,7 +319,7 @@ async def started_typing( if requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() await self.auth.check_user_in_room(room_id, requester) @@ -350,7 +354,7 @@ async def stopped_typing( if requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() await self.auth.check_user_in_room(room_id, requester) @@ -428,8 +432,10 @@ async def _recv_edu(self, origin: str, content: JsonDict) -> None: if user.domain in domains: logger.info("Got typing update from %s: %r", user_id, content) now = self.clock.time_msec() - self._member_typing_until[member] = now + FEDERATION_TIMEOUT - self.wheel_timer.insert(now=now, obj=member, then=now + FEDERATION_TIMEOUT) + self._member_typing_until[member] = now + FEDERATION_TIMEOUT.as_millis() + self.wheel_timer.insert( + now=now, obj=member, then=now + FEDERATION_TIMEOUT.as_millis() + ) self._push_update_local(member=member, typing=content["typing"]) def _push_update_local(self, member: RoomMember, typing: bool) -> None: diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index e5210a3e97..36b037e8e1 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -40,6 +40,7 @@ from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.roommember import ProfileInfo from synapse.types import UserID +from synapse.util.duration import Duration from synapse.util.metrics import Measure from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import non_null_str_or_none @@ -52,7 +53,7 @@ # Don't refresh a stale user directory entry, using a Federation /profile request, # for 60 seconds. This gives time for other state events to arrive (which will # then be coalesced such that only one /profile request is made). -USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000 +USER_DIRECTORY_STALE_REFRESH_TIME = Duration(minutes=1) # Maximum number of remote servers that we will attempt to refresh profiles for # in one go. @@ -60,7 +61,7 @@ # As long as we have servers to refresh (without backoff), keep adding more # every 15 seconds. -INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15 +INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = Duration(seconds=15) def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int: @@ -137,13 +138,13 @@ def __init__(self, hs: "HomeServer"): # We kick this off so that we don't have to wait for a change before # we start populating the user directory self.clock.call_later( - 0, + Duration(seconds=0), self.notify_new_event, ) # Kick off the profile refresh process on startup self._refresh_remote_profiles_call_later = self.clock.call_later( - 10, + Duration(seconds=10), self.kick_off_remote_profile_refresh_process, ) @@ -550,7 +551,7 @@ async def _handle_possible_remote_profile_change( now_ts = self.clock.time_msec() await self.store.set_remote_user_profile_in_user_dir_stale( user_id, - next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS, + next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME.as_millis(), retry_counter=0, ) # Schedule a wake-up to refresh the user directory for this server. @@ -558,13 +559,13 @@ async def _handle_possible_remote_profile_change( # other servers ahead of it in the queue to get in the way of updating # the profile if the server only just sent us an event. self.clock.call_later( - USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1, + USER_DIRECTORY_STALE_REFRESH_TIME + Duration(seconds=1), self.kick_off_remote_profile_refresh_process_for_remote_server, UserID.from_string(user_id).domain, ) # Schedule a wake-up to handle any backoffs that may occur in the future. self.clock.call_later( - 2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1, + USER_DIRECTORY_STALE_REFRESH_TIME * 2 + Duration(seconds=1), self.kick_off_remote_profile_refresh_process, ) return @@ -656,7 +657,9 @@ async def _unsafe_refresh_remote_profiles(self) -> None: if not users: return _, _, next_try_at_ts = users[0] - delay = ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2 + delay = Duration( + milliseconds=next_try_at_ts - self.clock.time_msec() + ) + Duration(seconds=2) self._refresh_remote_profiles_call_later = self.clock.call_later( delay, self.kick_off_remote_profile_refresh_process, diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index 4f9c632f5c..1537a18cc0 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -72,7 +72,7 @@ def __init__(self, hs: "HomeServer") -> None: # that lock. self._locks: dict[tuple[str, str], WeakSet[WaitingLock | WaitingMultiLock]] = {} - self._clock.looping_call(self._cleanup_locks, 30_000) + self._clock.looping_call(self._cleanup_locks, Duration(seconds=30)) self._notifier.add_lock_released_callback(self._on_lock_released) @@ -187,7 +187,7 @@ def _wake_all_locks( lock.release_lock() self._clock.call_later( - 0, + Duration(seconds=0), _wake_all_locks, locks, ) diff --git a/synapse/http/client.py b/synapse/http/client.py index cb9b8cd683..f0b9201086 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -87,6 +87,7 @@ from synapse.types import ISynapseReactor, StrSequence from synapse.util.async_helpers import timeout_deferred from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.json import json_decoder if TYPE_CHECKING: @@ -161,7 +162,9 @@ def _is_ip_blocked( return False -_EPSILON = 0.00000001 +# The delay used by the scheduler to schedule tasks "as soon as possible", while +# still allowing other tasks to run between runs. +_EPSILON = Duration(microseconds=1) def _make_scheduler(clock: Clock) -> Callable[[Callable[[], object]], IDelayedCall]: diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index ec72e178c9..303b3856a2 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -37,6 +37,7 @@ from synapse.types import ISynapseThreadlessReactor from synapse.util.caches.ttlcache import TTLCache from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.json import json_decoder from synapse.util.metrics import Measure @@ -315,7 +316,7 @@ async def _make_well_known_request( logger.info("Error fetching %s: %s. Retrying", uri_str, e) # Sleep briefly in the hopes that they come back up - await self._clock.sleep(0.5) + await self._clock.sleep(Duration(milliseconds=500)) def _cache_period_from_headers( diff --git a/synapse/http/server.py b/synapse/http/server.py index 5f4e7484fd..226cb00831 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -76,6 +76,7 @@ from synapse.util.caches import intern_dict from synapse.util.cancellation import is_function_cancellable from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.iterutils import chunk_seq from synapse.util.json import json_encoder @@ -334,7 +335,7 @@ async def _async_render_wrapper(self, request: "SynapseRequest") -> None: callback_return = await self._async_render(request) except LimitExceededError as e: if e.pause: - await self._clock.sleep(e.pause) + await self._clock.sleep(Duration(seconds=e.pause)) raise if callback_return is not None: diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 7b4408b2bc..29c5e66ec4 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -70,6 +70,7 @@ from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia from synapse.types import UserID from synapse.util.async_helpers import Linearizer +from synapse.util.duration import Duration from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import random_string @@ -80,10 +81,10 @@ # How often to run the background job to update the "recently accessed" # attribute of local and remote media. -UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 # 1 minute +UPDATE_RECENTLY_ACCESSED_TS = Duration(minutes=1) # How often to run the background job to check for local and remote media # that should be purged according to the configured media retention settings. -MEDIA_RETENTION_CHECK_PERIOD_MS = 60 * 60 * 1000 # 1 hour +MEDIA_RETENTION_CHECK_PERIOD = Duration(hours=1) class MediaRepository: @@ -166,7 +167,7 @@ def __init__(self, hs: "HomeServer"): # with the duration between runs dictated by the homeserver config. self.clock.looping_call( self._start_apply_media_retention_rules, - MEDIA_RETENTION_CHECK_PERIOD_MS, + MEDIA_RETENTION_CHECK_PERIOD, ) if hs.config.media.url_preview_enabled: @@ -485,7 +486,7 @@ async def get_local_media_info( if now >= wait_until: break - await self.clock.sleep(0.5) + await self.clock.sleep(Duration(milliseconds=500)) logger.info("Media %s has not yet been uploaded", media_id) self.respond_not_yet_uploaded(request) diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index bc12212c46..e83869bf4d 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -51,6 +51,7 @@ from synapse.logging.opentracing import start_active_span, trace, trace_with_opname from synapse.media._base import ThreadedFileSender from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.file_consumer import BackgroundFileConsumer from ..types import JsonDict @@ -457,7 +458,7 @@ async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None: callback(chunk) # We yield to the reactor by sleeping for 0 seconds. - await self.clock.sleep(0) + await self.clock.sleep(Duration(seconds=0)) @implementer(interfaces.IConsumer) @@ -652,7 +653,7 @@ async def _resumeProducingRepeatedly(self) -> None: self.paused = False while not self.paused: producer.resumeProducing() - await self.clock.sleep(0) + await self.clock.sleep(Duration(seconds=0)) class Header: diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index bbd8017b13..2c5e518918 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -47,6 +47,7 @@ from synapse.types import JsonDict, UserID from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.duration import Duration from synapse.util.json import json_encoder from synapse.util.stringutils import random_string @@ -208,7 +209,9 @@ def __init__( ) if self._worker_run_media_background_jobs: - self.clock.looping_call(self._start_expire_url_cache_data, 10 * 1000) + self.clock.looping_call( + self._start_expire_url_cache_data, Duration(seconds=10) + ) async def preview(self, url: str, user: UserID, ts: int) -> bytes: # the in-memory cache: diff --git a/synapse/metrics/common_usage_metrics.py b/synapse/metrics/common_usage_metrics.py index 3f38412fa7..bd27a9ca9f 100644 --- a/synapse/metrics/common_usage_metrics.py +++ b/synapse/metrics/common_usage_metrics.py @@ -23,6 +23,7 @@ import attr from synapse.metrics import SERVER_NAME_LABEL +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -70,7 +71,7 @@ def setup(self) -> None: ) self._clock.looping_call( self._hs.run_as_background_process, - 5 * 60 * 1000, + Duration(minutes=5), desc="common_usage_metrics_update_gauges", func=self._update_gauges, ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 6a2d152e3f..0580f3665c 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -158,6 +158,7 @@ from synapse.util.async_helpers import maybe_awaitable from synapse.util.caches.descriptors import CachedFunction, cached as _cached from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.frozenutils import freeze if TYPE_CHECKING: @@ -1389,7 +1390,7 @@ def looping_background_call( if self._hs.config.worker.run_background_tasks or run_on_all_instances: self._clock.looping_call( self._hs.run_as_background_process, - msec, + Duration(milliseconds=msec), desc, lambda: maybe_awaitable(f(*args, **kwargs)), ) @@ -1444,8 +1445,7 @@ def delayed_background_call( desc = f.__name__ return self._clock.call_later( - # convert ms to seconds as needed by call_later. - msec * 0.001, + Duration(milliseconds=msec), self._hs.run_as_background_process, desc, lambda: maybe_awaitable(f(*args, **kwargs)), @@ -1457,7 +1457,7 @@ async def sleep(self, seconds: float) -> None: Added in Synapse v1.49.0. """ - await self._clock.sleep(seconds) + await self._clock.sleep(Duration(seconds=seconds)) async def send_http_push_notification( self, diff --git a/synapse/notifier.py b/synapse/notifier.py index 260a2c0d87..d8d2db17f1 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -61,6 +61,7 @@ from synapse.util.async_helpers import ( timeout_deferred, ) +from synapse.util.duration import Duration from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_client @@ -235,7 +236,7 @@ class Notifier: Primarily used from the /events stream. """ - UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 + UNUSED_STREAM_EXPIRY = Duration(minutes=10) def __init__(self, hs: "HomeServer"): self.user_to_user_stream: dict[str, _NotifierUserStream] = {} @@ -269,9 +270,7 @@ def __init__(self, hs: "HomeServer"): self.state_handler = hs.get_state_handler() - self.clock.looping_call( - self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS - ) + self.clock.looping_call(self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY) # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at @@ -861,7 +860,7 @@ async def wait_for_stream_token(self, stream_token: StreamToken) -> bool: logged = True # TODO: be better - await self.clock.sleep(0.5) + await self.clock.sleep(Duration(milliseconds=500)) async def _get_room_ids( self, user: UserID, explicit_room_id: str | None @@ -889,7 +888,7 @@ async def _is_world_readable(self, room_id: str) -> bool: def remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec() expired_streams = [] - expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS + expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY.as_millis() for stream in self.user_to_user_stream.values(): if stream.count_listeners(): continue diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 36dc9bf6fc..ce4a2102e4 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -29,6 +29,7 @@ from synapse.push.mailer import Mailer from synapse.push.push_types import EmailReason from synapse.storage.databases.main.event_push_actions import EmailPushAction +from synapse.util.duration import Duration from synapse.util.threepids import validate_email if TYPE_CHECKING: @@ -229,7 +230,7 @@ async def _unsafe_process(self) -> None: if soonest_due_at is not None: delay = self.seconds_until(soonest_due_at) self.timed_call = self.hs.get_clock().call_later( - delay, + Duration(seconds=delay), self.on_timer, ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index edcabf0c29..1e7e742ddd 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -40,6 +40,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer +from synapse.util.duration import Duration logger = logging.getLogger(__name__) @@ -336,7 +337,7 @@ async def _unsafe_process(self) -> None: else: logger.info("Push failed: delaying for %ds", self.backoff_delay) self.timed_call = self.hs.get_clock().call_later( - self.backoff_delay, + Duration(seconds=self.backoff_delay), self.on_timer, ) self.backoff_delay = min( @@ -371,7 +372,7 @@ async def _process_one(self, push_action: HttpPushAction) -> bool: delay_ms = random.randint(1, self.push_jitter_delay_ms) diff_ms = event.origin_server_ts + delay_ms - self.clock.time_msec() if diff_ms > 0: - await self.clock.sleep(diff_ms / 1000) + await self.clock.sleep(Duration(milliseconds=diff_ms)) rejected = await self.dispatch_push_event(event, tweaks, badge) if rejected is False: diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index d76b40cf39..2bab9c2d71 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -42,6 +42,7 @@ from synapse.types import JsonDict from synapse.util.caches.response_cache import ResponseCache from synapse.util.cancellation import is_function_cancellable +from synapse.util.duration import Duration from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -317,7 +318,7 @@ async def send_request( # If we timed out we probably don't need to worry about backing # off too much, but lets just wait a little anyway. - await clock.sleep(1) + await clock.sleep(Duration(seconds=1)) except (ConnectError, DNSLookupError) as e: if not cls.RETRY_ON_CONNECT_ERROR: raise @@ -332,7 +333,7 @@ async def send_request( e, ) - await clock.sleep(delay) + await clock.sleep(Duration(seconds=delay)) attempts += 1 except HttpResponseException as e: # We convert to SynapseError as we know that it was a SynapseError diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 297feb0049..fdda932ead 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -55,6 +55,7 @@ ) from synapse.types import PersistedEventPosition, ReadReceipt, StreamKeyType, UserID from synapse.util.async_helpers import Linearizer, timeout_deferred +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -173,7 +174,7 @@ async def on_rdata( ) # Yield to reactor so that we don't block. - await self._clock.sleep(0) + await self._clock.sleep(Duration(seconds=0)) elif stream_name == PushersStream.NAME: for row in rows: if row.deleted: diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 3068e60af0..489a2c76a6 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -55,6 +55,7 @@ parse_command_from_line, ) from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -193,7 +194,9 @@ def connectionMade(self) -> None: self._send_pending_commands() # Starts sending pings - self._send_ping_loop = self.clock.looping_call(self.send_ping, 5000) + self._send_ping_loop = self.clock.looping_call( + self.send_ping, Duration(seconds=5) + ) # Always send the initial PING so that the other side knows that they # can time us out. diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 27d43e6fba..93ba48b406 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -53,6 +53,7 @@ tcp_inbound_commands_counter, tcp_outbound_commands_counter, ) +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.replication.tcp.handler import ReplicationCommandHandler @@ -317,7 +318,7 @@ def __init__( self.hs = hs # nb must be called this for @wrap_as_background_process self.server_name = hs.hostname - hs.get_clock().looping_call(self._send_ping, 30 * 1000) + hs.get_clock().looping_call(self._send_ping, Duration(seconds=30)) @wrap_as_background_process("redis_ping") async def _send_ping(self) -> None: diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 134d8d921f..36dd39ed67 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -34,6 +34,7 @@ from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol from synapse.replication.tcp.streams import EventsStream from synapse.replication.tcp.streams._base import CachesStream, StreamRow, Token +from synapse.util.duration import Duration from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -116,7 +117,7 @@ def __init__(self, hs: "HomeServer"): # # Note that if the position hasn't advanced then we won't send anything. if any(EventsStream.NAME == s.NAME for s in self.streams): - self.clock.looping_call(self.on_notifier_poke, 1000) + self.clock.looping_call(self.on_notifier_poke, Duration(seconds=1)) def on_notifier_poke(self) -> None: """Checks if there is actually any new data and sends it to the diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index b052052be0..3cb1e09f44 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -58,6 +58,7 @@ EmailRequestTokenBody, MsisdnRequestTokenBody, ) +from synapse.util.duration import Duration from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import assert_valid_client_secret, random_string from synapse.util.threepids import check_3pid_allowed, validate_email @@ -125,7 +126,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) @@ -383,7 +386,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) @@ -449,7 +454,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} logger.info("MSISDN %s is already in use by %s", msisdn, existing_user_id) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 9503446b92..fdd2f1985a 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -59,6 +59,7 @@ from synapse.metrics import SERVER_NAME_LABEL, threepid_send_requests from synapse.push.mailer import Mailer from synapse.types import JsonDict +from synapse.util.duration import Duration from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import assert_valid_client_secret, random_string @@ -150,7 +151,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. await self.already_in_use_mailer.send_already_in_use_mail(email) - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) @@ -219,7 +222,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} raise SynapseError( diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 0c1ac1f11b..43c7b6f993 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -57,7 +57,7 @@ def __init__(self, hs: "HomeServer"): ] = {} # Try to clean entries every 30 mins. This means entries will exist # for at *LEAST* 30 mins, and at *MOST* 60 mins. - self.clock.looping_call(self._cleanup, CLEANUP_PERIOD.as_millis()) + self.clock.looping_call(self._cleanup, CLEANUP_PERIOD) def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable: """A helper function which returns a transaction key that can be used diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9fc49be4b1..a92233c863 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -54,6 +54,7 @@ from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.duration import Duration from synapse.util.metrics import Measure, measure_func from synapse.util.stringutils import shortstr @@ -663,7 +664,7 @@ def __init__(self, hs: "HomeServer"): _StateResMetrics ) - self.clock.looping_call(self._report_metrics, 120 * 1000) + self.clock.looping_call(self._report_metrics, Duration(minutes=2)) async def resolve_state_groups( self, diff --git a/synapse/state/v2.py b/synapse/state/v2.py index c410c3a7ec..1241a4d66e 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -40,6 +40,7 @@ from synapse.events import EventBase, is_creator from synapse.storage.databases.main.event_federation import StateDifference from synapse.types import MutableStateMap, StateMap, StrCollection +from synapse.util.duration import Duration logger = logging.getLogger(__name__) @@ -48,7 +49,7 @@ class Clock(Protocol): # This is usually synapse.util.Clock, but it's replaced with a FakeClock in tests. # We only ever sleep(0) though, so that other async functions can make forward # progress without waiting for stateres to complete. - async def sleep(self, duration_ms: float) -> None: ... + async def sleep(self, duration: Duration) -> None: ... class StateResolutionStore(Protocol): @@ -639,7 +640,7 @@ async def _reverse_topological_power_sort( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) event_to_pl = {} for idx, event_id in enumerate(graph, start=1): @@ -651,7 +652,7 @@ async def _reverse_topological_power_sort( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) def _get_power_order(event_id: str) -> tuple[int, int, str]: ev = event_map[event_id] @@ -745,7 +746,7 @@ async def _iterative_auth_checks( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) return resolved_state @@ -796,7 +797,7 @@ async def _mainline_sort( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx != 0 and idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) idx += 1 @@ -814,7 +815,7 @@ async def _mainline_sort( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) event_ids.sort(key=lambda ev_id: order_map[ev_id]) @@ -865,7 +866,7 @@ async def _get_mainline_depth_for_event( idx += 1 if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) # Didn't find a power level auth event, so we just return 0 return 0 diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index c71bcdb7fb..311534c5e7 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -40,6 +40,7 @@ from synapse.storage.types import Connection, Cursor from synapse.types import JsonDict, StrCollection from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.json import json_encoder from . import engines @@ -162,7 +163,7 @@ def __init__( async def __aenter__(self) -> int: if self._sleep: - await self._clock.sleep(self._sleep_duration_ms / 1000) + await self._clock.sleep(Duration(milliseconds=self._sleep_duration_ms)) return self._update_duration_ms diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py index 4ca3f8f4e1..8a2053d25a 100644 --- a/synapse/storage/controllers/purge_events.py +++ b/synapse/storage/controllers/purge_events.py @@ -32,6 +32,7 @@ from synapse.storage.database import LoggingTransaction from synapse.storage.databases import Databases from synapse.types.storage import _BackgroundUpdates +from synapse.util.duration import Duration from synapse.util.stringutils import shortstr if TYPE_CHECKING: @@ -50,7 +51,7 @@ def __init__(self, hs: "HomeServer", stores: Databases): if hs.config.worker.run_background_tasks: self._delete_state_loop_call = hs.get_clock().looping_call( - self._delete_state_groups_loop, 60 * 1000 + self._delete_state_groups_loop, Duration(minutes=1) ) self.stores.state.db_pool.updates.register_background_update_handler( diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 18f0eac585..2d5e1d3c48 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -62,6 +62,7 @@ from synapse.storage.types import Connection, Cursor, SQLQueryParameters from synapse.types import StrCollection from synapse.util.async_helpers import delay_cancellation +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -631,7 +632,7 @@ def __init__( # Check ASAP (and then later, every 1s) to see if we have finished # background updates of tables that aren't safe to update. self._clock.call_later( - 0.0, + Duration(seconds=0), self.hs.run_as_background_process, "upsert_safety_check", self._check_safe_to_upsert, @@ -679,7 +680,7 @@ async def _check_safe_to_upsert(self) -> None: # If there's any updates still running, reschedule to run. if background_update_names: self._clock.call_later( - 15.0, + Duration(seconds=15), self.hs.run_as_background_process, "upsert_safety_check", self._check_safe_to_upsert, @@ -706,7 +707,7 @@ def loop() -> None: "Total database time: %.3f%% {%s}", ratio * 100, top_three_counters ) - self._clock.looping_call(loop, 10000) + self._clock.looping_call(loop, Duration(seconds=10)) def new_transaction( self, diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index b7b9b42461..a4530796f2 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -45,6 +45,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.util.caches.descriptors import CachedFunction +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -71,11 +72,11 @@ # How long between cache invalidation table cleanups, once we have caught up # with the backlog. -REGULAR_CLEANUP_INTERVAL_MS = Config.parse_duration("1h") +REGULAR_CLEANUP_INTERVAL = Duration(hours=1) # How long between cache invalidation table cleanups, before we have caught # up with the backlog. -CATCH_UP_CLEANUP_INTERVAL_MS = Config.parse_duration("1m") +CATCH_UP_CLEANUP_INTERVAL = Duration(minutes=1) # Maximum number of cache invalidation rows to delete at once. CLEAN_UP_MAX_BATCH_SIZE = 20_000 @@ -139,7 +140,7 @@ def __init__( self.database_engine, PostgresEngine ): self.hs.get_clock().call_later( - CATCH_UP_CLEANUP_INTERVAL_MS / 1000, + CATCH_UP_CLEANUP_INTERVAL, self._clean_up_cache_invalidation_wrapper, ) @@ -825,12 +826,12 @@ async def _clean_up_cache_invalidation_wrapper(self) -> None: # Vary how long we wait before calling again depending on whether we # are still sifting through backlog or we have caught up. if in_backlog: - next_interval = CATCH_UP_CLEANUP_INTERVAL_MS + next_interval = CATCH_UP_CLEANUP_INTERVAL else: - next_interval = REGULAR_CLEANUP_INTERVAL_MS + next_interval = REGULAR_CLEANUP_INTERVAL self.hs.get_clock().call_later( - next_interval / 1000, + next_interval, self._clean_up_cache_invalidation_wrapper, ) diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 5d667a5345..a5ae4bf506 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -32,6 +32,7 @@ ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.util.duration import Duration from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -54,7 +55,7 @@ def __init__( hs.config.worker.run_background_tasks and self.hs.config.server.redaction_retention_period is not None ): - hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000) + hs.get_clock().looping_call(self._censor_redactions, Duration(minutes=5)) @wrap_as_background_process("_censor_redactions") async def _censor_redactions(self) -> None: diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 4948d0c286..7cd3667a2b 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -42,6 +42,7 @@ ) from synapse.types import JsonDict, UserID from synapse.util.caches.lrucache import LruCache +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -437,7 +438,7 @@ def __init__( ) if hs.config.worker.run_background_tasks and self.user_ips_max_age: - self.clock.looping_call(self._prune_old_user_ips, 5 * 1000) + self.clock.looping_call(self._prune_old_user_ips, Duration(seconds=5)) if self._update_on_this_worker: # This is the designated worker that can write to the client IP @@ -448,7 +449,7 @@ def __init__( tuple[str, str, str], tuple[str, str | None, int] ] = {} - self.clock.looping_call(self._update_client_ips_batch, 5 * 1000) + self.clock.looping_call(self._update_client_ips_batch, Duration(seconds=5)) hs.register_async_shutdown_handler( phase="before", eventType="shutdown", diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index 28e706d5c3..fc61f46c1c 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -152,7 +152,7 @@ def __init__( if hs.config.worker.run_background_tasks: self.clock.looping_call( run_as_background_process, - DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL.as_millis(), + DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL, "_delete_old_federation_inbox_rows", self.server_name, self._delete_old_federation_inbox_rows, @@ -1029,7 +1029,7 @@ def _delete_old_federation_inbox_rows_txn(txn: LoggingTransaction) -> bool: # We sleep a bit so that we don't hammer the database in a tight # loop first time we run this. - await self.clock.sleep(1) + await self.clock.sleep(Duration(seconds=1)) async def get_devices_with_messages( self, user_id: str, device_ids: StrCollection diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index caae2a0648..cbad40faf7 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -62,6 +62,7 @@ from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.json import json_decoder, json_encoder from synapse.util.stringutils import shortstr @@ -191,7 +192,7 @@ def __init__( if hs.config.worker.run_background_tasks: self.clock.looping_call( - self._prune_old_outbound_device_pokes, 60 * 60 * 1000 + self._prune_old_outbound_device_pokes, Duration(hours=1) ) def process_replication_rows( diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index b2f0aeaf58..cc7083b605 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -56,6 +56,7 @@ from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.json import json_encoder @@ -155,7 +156,7 @@ def __init__( if hs.config.worker.run_background_tasks: hs.get_clock().looping_call( - self._delete_old_forward_extrem_cache, 60 * 60 * 1000 + self._delete_old_forward_extrem_cache, Duration(hours=1) ) # Cache of event ID to list of auth event IDs and their depths. @@ -171,7 +172,9 @@ def __init__( # index. self.tests_allow_no_chain_cover_index = True - self.clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000) + self.clock.looping_call( + self._get_stats_for_federation_staging, Duration(seconds=30) + ) if isinstance(self.database_engine, PostgresEngine): self.db_pool.updates.register_background_validate_constraint_and_delete_rows( diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 2e99d7314e..a66caa672c 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -105,6 +105,7 @@ from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.types import JsonDict, StrCollection from synapse.util.caches.descriptors import cached +from synapse.util.duration import Duration from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -270,15 +271,17 @@ def __init__( self._find_stream_orderings_for_times_txn(cur) cur.close() - self.clock.looping_call(self._find_stream_orderings_for_times, 10 * 60 * 1000) + self.clock.looping_call( + self._find_stream_orderings_for_times, Duration(minutes=10) + ) self._rotate_count = 10000 self._doing_notif_rotation = False if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._rotate_notifs, 30 * 1000) + self.clock.looping_call(self._rotate_notifs, Duration(seconds=30)) self.clock.looping_call( - self._clear_old_push_actions_staging, 30 * 60 * 1000 + self._clear_old_push_actions_staging, Duration(minutes=30) ) self.db_pool.updates.register_background_index_update( @@ -1817,7 +1820,7 @@ def _clear_old_push_actions_staging_txn(txn: LoggingTransaction) -> bool: return # We sleep to ensure that we don't overwhelm the DB. - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) async def get_push_actions_for_user( self, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 4cf708442d..ae6ee50dc2 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -92,6 +92,7 @@ from synapse.util.caches.lrucache import AsyncLruCache from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -278,7 +279,7 @@ def __init__( # We periodically clean out old transaction ID mappings self.clock.looping_call( self._cleanup_old_transaction_ids, - 5 * 60 * 1000, + Duration(minutes=5), ) self._get_event_cache: AsyncLruCache[tuple[str], EventCacheEntry] = ( diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 51f04acbcb..dd49f98366 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -38,6 +38,7 @@ ) from synapse.types import ISynapseReactor from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -49,11 +50,13 @@ # How often to renew an acquired lock by updating the `last_renewed_ts` time in # the lock table. -_RENEWAL_INTERVAL_MS = 30 * 1000 +_RENEWAL_INTERVAL = Duration(seconds=30) # How long before an acquired lock times out. _LOCK_TIMEOUT_MS = 2 * 60 * 1000 +_LOCK_REAP_INTERVAL = Duration(milliseconds=_LOCK_TIMEOUT_MS / 10.0) + class LockStore(SQLBaseStore): """Provides a best effort distributed lock between worker instances. @@ -106,9 +109,7 @@ def __init__( self._acquiring_locks: set[tuple[str, str]] = set() - self.clock.looping_call( - self._reap_stale_read_write_locks, _LOCK_TIMEOUT_MS / 10.0 - ) + self.clock.looping_call(self._reap_stale_read_write_locks, _LOCK_REAP_INTERVAL) @wrap_as_background_process("LockStore._on_shutdown") async def _on_shutdown(self) -> None: @@ -410,7 +411,7 @@ def __init__( def _setup_looping_call(self) -> None: self._looping_call = self._clock.looping_call( self._renew, - _RENEWAL_INTERVAL_MS, + _RENEWAL_INTERVAL, self._server_name, self._store, self._hs, diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index dc8e2c1616..b2b4561247 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -34,6 +34,7 @@ from synapse.storage.databases.main.event_push_actions import ( EventPushActionsWorkerStore, ) +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -78,7 +79,7 @@ def __init__( # Read the extrems every 60 minutes if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000) + self.clock.looping_call(self._read_forward_extremities, Duration(hours=1)) # Used in _generate_user_daily_visits to keep track of progress self._last_user_visit_update = self._get_start_of_day() diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 545b0f11c4..9a9c0fffc7 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -49,13 +49,12 @@ from synapse.storage.util.sequence import build_sequence_generator from synapse.types import JsonDict, StrCollection, UserID, UserInfo from synapse.util.caches.descriptors import cached +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer -THIRTY_MINUTES_IN_MS = 30 * 60 * 1000 - logger = logging.getLogger(__name__) @@ -213,7 +212,7 @@ def __init__( if hs.config.worker.run_background_tasks: self.clock.call_later( - 0.0, + Duration(seconds=0), self._set_expiration_date_when_missing, ) @@ -227,7 +226,7 @@ def __init__( # Create a background job for culling expired 3PID validity tokens if hs.config.worker.run_background_tasks: self.clock.looping_call( - self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS + self.cull_expired_threepid_validation_tokens, Duration(minutes=30) ) async def register_user( @@ -2739,7 +2738,7 @@ def __init__( # Create a background job for removing expired login tokens if hs.config.worker.run_background_tasks: self.clock.looping_call( - self._delete_expired_login_tokens, THIRTY_MINUTES_IN_MS + self._delete_expired_login_tokens, Duration(minutes=30) ) async def add_access_token_to_user( diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 4fb7779d38..9b06ab69fe 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -63,6 +63,7 @@ get_domain_from_id, ) from synapse.util.caches.descriptors import _CacheContext, cached, cachedList +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -110,10 +111,10 @@ def __init__( self._known_servers_count = 1 self.hs.get_clock().looping_call( self._count_known_servers, - 60 * 1000, + Duration(minutes=1), ) self.hs.get_clock().call_later( - 1, + Duration(seconds=1), self._count_known_servers, ) federation_known_servers_gauge.register_hook( diff --git a/synapse/storage/databases/main/session.py b/synapse/storage/databases/main/session.py index 1154bb2d59..f088a8d88c 100644 --- a/synapse/storage/databases/main/session.py +++ b/synapse/storage/databases/main/session.py @@ -30,6 +30,7 @@ LoggingTransaction, ) from synapse.types import JsonDict +from synapse.util.duration import Duration from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -55,7 +56,7 @@ def __init__( # Create a background job for culling expired sessions. if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000) + self.clock.looping_call(self._delete_expired_sessions, Duration(minutes=30)) async def create_session( self, session_type: str, value: JsonDict, expiry_ms: int diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index 6f87308cde..828eed3a73 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -96,7 +96,7 @@ def __init__( if self.hs.config.worker.run_background_tasks: self.clock.looping_call( self.delete_old_sliding_sync_connections, - CONNECTION_EXPIRY_FREQUENCY.as_millis(), + CONNECTION_EXPIRY_FREQUENCY, ) async def get_latest_bump_stamp_for_room( diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 70c5b928fd..2fdd27d3da 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -37,6 +37,7 @@ from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.types import JsonDict, StrCollection from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -81,7 +82,7 @@ def __init__( super().__init__(database, db_conn, hs) if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) + self.clock.looping_call(self._cleanup_transactions, Duration(minutes=30)) @wrap_as_background_process("cleanup_transactions") async def _cleanup_transactions(self) -> None: diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 6f9bbcac67..818f8b1a69 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -58,6 +58,7 @@ run_in_background, ) from synapse.util.clock import Clock +from synapse.util.duration import Duration logger = logging.getLogger(__name__) @@ -640,7 +641,7 @@ async def _acquire_lock(self, key: Hashable) -> _LinearizerEntry: # This needs to happen while we hold the lock. We could put it on the # exit path, but that would slow down the uncontended case. try: - await self._clock.sleep(0) + await self._clock.sleep(Duration(seconds=0)) except CancelledError: self._release_lock(key, entry) raise @@ -818,7 +819,9 @@ def time_it_out() -> None: # We don't track these calls since they are short. delayed_call = clock.call_later( - timeout, time_it_out, call_later_cancel_on_shutdown=cancel_on_shutdown + Duration(seconds=timeout), + time_it_out, + call_later_cancel_on_shutdown=cancel_on_shutdown, ) def convert_cancelled(value: Failure) -> Failure: diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py index 514abcbec1..43eefcb7f1 100644 --- a/synapse/util/batching_queue.py +++ b/synapse/util/batching_queue.py @@ -36,6 +36,7 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.metrics import SERVER_NAME_LABEL from synapse.util.clock import Clock +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -175,7 +176,7 @@ async def _process_queue(self, key: Hashable) -> None: # pattern is to call `add_to_queue` multiple times at once, and # deferring to the next reactor tick allows us to batch all of # those up. - await self._clock.sleep(0) + await self._clock.sleep(Duration(seconds=0)) next_values = self._next_values.pop(key, []) if not next_values: diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 528e4bb852..87870f4223 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -38,6 +38,7 @@ from synapse.config import cache as cache_config from synapse.util.caches import EvictionReason, register_cache from synapse.util.clock import Clock +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -112,7 +113,7 @@ def __init__( def f() -> "defer.Deferred[None]": return hs.run_as_background_process("prune_cache", self._prune_cache) - self._clock.looping_call(f, self._expiry_ms / 2) + self._clock.looping_call(f, Duration(milliseconds=self._expiry_ms / 2)) def __setitem__(self, key: KT, value: VT) -> None: now = self._clock.time_msec() diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index d304e804e9..a3e7bd4d03 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -50,6 +50,7 @@ iterate_tree_cache_items, ) from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.linked_list import ListNode if TYPE_CHECKING: @@ -202,9 +203,9 @@ async def _internal_expire_old_entries( if (i + 1) % 10000 == 0: logger.debug("Waiting during drop") if node.last_access_ts_secs > now - expiry_seconds: - await clock.sleep(0.5) + await clock.sleep(Duration(milliseconds=500)) else: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) logger.debug("Waking during drop") node = next_node @@ -248,7 +249,7 @@ def setup_expire_lru_cache_entries(hs: "HomeServer") -> None: clock = hs.get_clock() clock.looping_call( _expire_old_entries, - 30 * 1000, + Duration(seconds=30), server_name, hs, clock, diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index b1cdc81dda..0289e13f6a 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -42,6 +42,7 @@ from synapse.util.async_helpers import AbstractObservableDeferred, ObservableDeferred from synapse.util.caches import EvictionReason, register_cache from synapse.util.clock import Clock +from synapse.util.duration import Duration logger = logging.getLogger(__name__) @@ -120,7 +121,7 @@ def __init__( self._result_cache: dict[KV, ResponseCacheEntry] = {} self.clock = clock - self.timeout_sec = timeout_ms / 1000.0 + self.timeout = Duration(milliseconds=timeout_ms) self._name = name self._metrics = register_cache( @@ -195,9 +196,9 @@ def on_complete(r: RV) -> RV: # if this cache has a non-zero timeout, and the callback has not cleared # the should_cache bit, we leave it in the cache for now and schedule # its removal later. - if self.timeout_sec and context.should_cache: + if self.timeout and context.should_cache: self.clock.call_later( - self.timeout_sec, + self.timeout, self._entry_timeout, key, # We don't need to track these calls since they don't hold any strong diff --git a/synapse/util/clock.py b/synapse/util/clock.py index 65f7164896..6fd31864b7 100644 --- a/synapse/util/clock.py +++ b/synapse/util/clock.py @@ -31,6 +31,7 @@ from synapse.logging import context from synapse.types import ISynapseThreadlessReactor from synapse.util import log_failure +from synapse.util.duration import Duration from synapse.util.stringutils import random_string_insecure_fast P = ParamSpec("P") @@ -84,14 +85,14 @@ def shutdown(self) -> None: self.cancel_all_looping_calls() self.cancel_all_delayed_calls() - async def sleep(self, seconds: float) -> None: + async def sleep(self, duration: Duration) -> None: d: defer.Deferred[float] = defer.Deferred() # Start task in the `sentinel` logcontext, to avoid leaking the current context # into the reactor once it finishes. with context.PreserveLoggingContext(): # We can ignore the lint here since this class is the one location callLater should # be called. - self._reactor.callLater(seconds, d.callback, seconds) # type: ignore[call-later-not-tracked] + self._reactor.callLater(duration.as_secs(), d.callback, duration.as_secs()) # type: ignore[call-later-not-tracked] await d def time(self) -> float: @@ -105,13 +106,13 @@ def time_msec(self) -> int: def looping_call( self, f: Callable[P, object], - msec: float, + duration: Duration, *args: P.args, **kwargs: P.kwargs, ) -> LoopingCall: """Call a function repeatedly. - Waits `msec` initially before calling `f` for the first time. + Waits `duration` initially before calling `f` for the first time. If the function given to `looping_call` returns an awaitable/deferred, the next call isn't scheduled until after the returned awaitable has finished. We get @@ -124,16 +125,16 @@ def looping_call( Args: f: The function to call repeatedly. - msec: How long to wait between calls in milliseconds. + duration: How long to wait between calls. *args: Positional arguments to pass to function. **kwargs: Key arguments to pass to function. """ - return self._looping_call_common(f, msec, False, *args, **kwargs) + return self._looping_call_common(f, duration, False, *args, **kwargs) def looping_call_now( self, f: Callable[P, object], - msec: float, + duration: Duration, *args: P.args, **kwargs: P.kwargs, ) -> LoopingCall: @@ -148,16 +149,16 @@ def looping_call_now( Args: f: The function to call repeatedly. - msec: How long to wait between calls in milliseconds. + duration: How long to wait between calls. *args: Positional arguments to pass to function. **kwargs: Key arguments to pass to function. """ - return self._looping_call_common(f, msec, True, *args, **kwargs) + return self._looping_call_common(f, duration, True, *args, **kwargs) def _looping_call_common( self, f: Callable[P, object], - msec: float, + duration: Duration, now: bool, *args: P.args, **kwargs: P.kwargs, @@ -217,7 +218,7 @@ def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred: # We want to start the task in the `sentinel` logcontext, to avoid leaking the # current context into the reactor after the function finishes. with context.PreserveLoggingContext(): - d = call.start(msec / 1000.0, now=now) + d = call.start(duration.as_secs(), now=now) d.addErrback(log_failure, "Looping call died", consumeErrors=False) self._looping_calls.append(call) @@ -225,7 +226,7 @@ def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred: "%s(%s): Scheduled looping call every %sms later", looping_call_context_string, instance_id, - msec, + duration.as_millis(), # Find out who is scheduling the call which makes it easy to follow in the # logs. stack_info=True, @@ -251,7 +252,7 @@ def cancel_all_looping_calls(self, consumeErrors: bool = True) -> None: def call_later( self, - delay: float, + delay: Duration, callback: Callable, *args: Any, call_later_cancel_on_shutdown: bool = True, @@ -264,7 +265,7 @@ def call_later( `run_as_background_process` to give it more specific label and track metrics. Args: - delay: How long to wait in seconds. + delay: How long to wait. callback: Function to call *args: Postional arguments to pass to function. call_later_cancel_on_shutdown: Whether this call should be tracked for cleanup during @@ -322,7 +323,9 @@ def wrapped_callback(*args: Any, **kwargs: Any) -> None: # We can ignore the lint here since this class is the one location callLater should # be called. - call = self._reactor.callLater(delay, wrapped_callback, *args, **kwargs) # type: ignore[call-later-not-tracked] + call = self._reactor.callLater( + delay.as_secs(), wrapped_callback, *args, **kwargs + ) # type: ignore[call-later-not-tracked] logger.debug( "call_later(%s): Scheduled call for %ss later (tracked for shutdown: %s)", diff --git a/synapse/util/duration.py b/synapse/util/duration.py index 3419f6dda6..135b980852 100644 --- a/synapse/util/duration.py +++ b/synapse/util/duration.py @@ -13,6 +13,7 @@ # from datetime import timedelta +from typing import overload # Constant so we don't keep creating new timedelta objects when calling # `.as_millis()`. @@ -35,6 +36,82 @@ def as_millis(self) -> int: """Returns the duration in milliseconds.""" return int(self / _ONE_MILLISECOND) - def as_secs(self) -> int: + def as_secs(self) -> float: """Returns the duration in seconds.""" - return int(self.total_seconds()) + return self.total_seconds() + + # Override arithmetic operations to return Duration instances + + def __add__(self, other: timedelta) -> "Duration": + """Add two durations together, returning a Duration.""" + result = super().__add__(other) + return Duration(seconds=result.total_seconds()) + + def __radd__(self, other: timedelta) -> "Duration": + """Add two durations together (reversed), returning a Duration.""" + result = super().__radd__(other) + return Duration(seconds=result.total_seconds()) + + def __sub__(self, other: timedelta) -> "Duration": + """Subtract two durations, returning a Duration.""" + result = super().__sub__(other) + return Duration(seconds=result.total_seconds()) + + def __rsub__(self, other: timedelta) -> "Duration": + """Subtract two durations (reversed), returning a Duration.""" + result = super().__rsub__(other) + return Duration(seconds=result.total_seconds()) + + def __mul__(self, other: float) -> "Duration": + """Multiply a duration by a scalar, returning a Duration.""" + result = super().__mul__(other) + return Duration(seconds=result.total_seconds()) + + def __rmul__(self, other: float) -> "Duration": + """Multiply a duration by a scalar (reversed), returning a Duration.""" + result = super().__rmul__(other) + return Duration(seconds=result.total_seconds()) + + @overload + def __truediv__(self, other: timedelta) -> float: ... + + @overload + def __truediv__(self, other: float) -> "Duration": ... + + def __truediv__(self, other: float | timedelta) -> "Duration | float": + """Divide a duration by a scalar or another duration. + + If dividing by a scalar, returns a Duration. + If dividing by a timedelta, returns a float ratio. + """ + result = super().__truediv__(other) + if isinstance(other, timedelta): + # Dividing by a timedelta gives a float ratio + assert isinstance(result, float) + return result + else: + # Dividing by a scalar gives a Duration + assert isinstance(result, timedelta) + return Duration(seconds=result.total_seconds()) + + @overload + def __floordiv__(self, other: timedelta) -> int: ... + + @overload + def __floordiv__(self, other: int) -> "Duration": ... + + def __floordiv__(self, other: int | timedelta) -> "Duration | int": + """Floor divide a duration by a scalar or another duration. + + If dividing by a scalar, returns a Duration. + If dividing by a timedelta, returns an int ratio. + """ + result = super().__floordiv__(other) + if isinstance(other, timedelta): + # Dividing by a timedelta gives an int ratio + assert isinstance(result, int) + return result + else: + # Dividing by a scalar gives a Duration + assert isinstance(result, timedelta) + return Duration(seconds=result.total_seconds()) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 024706d9cf..d1053d227b 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -48,6 +48,7 @@ from synapse.logging.opentracing import start_active_span from synapse.metrics import SERVER_NAME_LABEL, Histogram, LaterGauge from synapse.util.clock import Clock +from synapse.util.duration import Duration if typing.TYPE_CHECKING: from contextlib import _GeneratorContextManager @@ -353,7 +354,9 @@ def queue_request() -> "defer.Deferred[None]": rate_limiter_name=self.metrics_name, **{SERVER_NAME_LABEL: self.our_server_name}, ).inc() - ret_defer = run_in_background(self.clock.sleep, self.sleep_sec) + ret_defer = run_in_background( + self.clock.sleep, Duration(seconds=self.sleep_sec) + ) self.sleeping_requests.add(request_id) @@ -414,6 +417,6 @@ def start_next_request() -> None: pass self.clock.call_later( - 0.0, + Duration(seconds=0), start_next_request, ) diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 3b4423a1ff..353ddb70bc 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -35,6 +35,7 @@ wrap_as_background_process, ) from synapse.types import JsonMapping, ScheduledTask, TaskStatus +from synapse.util.duration import Duration from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -92,8 +93,8 @@ class TaskScheduler: """ # Precision of the scheduler, evaluation of tasks to run will only happen - # every `SCHEDULE_INTERVAL_MS` ms - SCHEDULE_INTERVAL_MS = 1 * 60 * 1000 # 1mn + # every `SCHEDULE_INTERVAL` + SCHEDULE_INTERVAL = Duration(minutes=1) # How often to clean up old tasks. CLEANUP_INTERVAL_MS = 30 * 60 * 1000 # Time before a complete or failed task is deleted from the DB @@ -103,7 +104,7 @@ class TaskScheduler: # Time from the last task update after which we will log a warning LAST_UPDATE_BEFORE_WARNING_MS = 24 * 60 * 60 * 1000 # 24hrs # Report a running task's status and usage every so often. - OCCASIONAL_REPORT_INTERVAL_MS = 5 * 60 * 1000 # 5 minutes + OCCASIONAL_REPORT_INTERVAL = Duration(minutes=5) def __init__(self, hs: "HomeServer"): self.hs = hs # nb must be called this for @wrap_as_background_process @@ -127,11 +128,11 @@ def __init__(self, hs: "HomeServer"): if self._run_background_tasks: self._clock.looping_call( self._launch_scheduled_tasks, - TaskScheduler.SCHEDULE_INTERVAL_MS, + TaskScheduler.SCHEDULE_INTERVAL, ) self._clock.looping_call( self._clean_scheduled_tasks, - TaskScheduler.SCHEDULE_INTERVAL_MS, + TaskScheduler.SCHEDULE_INTERVAL, ) running_tasks_gauge.register_hook( @@ -433,7 +434,7 @@ async def wrapper() -> None: start_time = self._clock.time() occasional_status_call = self._clock.looping_call( _occasional_report, - TaskScheduler.OCCASIONAL_REPORT_INTERVAL_MS, + TaskScheduler.OCCASIONAL_REPORT_INTERVAL, log_context, start_time, ) @@ -468,7 +469,7 @@ async def wrapper() -> None: # Try launch a new task since we've finished with this one. self._clock.call_later( - 0.1, + Duration(milliseconds=100), self._launch_scheduled_tasks, ) diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index d89f487d3d..243f9dbca0 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -37,6 +37,7 @@ from synapse.synapse_rust import reset_logging_config from synapse.types import ISynapseReactor from synapse.util.clock import Clock +from synapse.util.duration import Duration class LineCounter(LineOnlyReceiver): @@ -141,7 +142,7 @@ class _logging: if len(handler._buffer) == handler.maximum_buffer: while len(handler._buffer) > handler.maximum_buffer / 2: - await clock.sleep(0.01) + await clock.sleep(Duration(milliseconds=10)) await logger_factory.on_done diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py index 3c553e6e40..00a9c2064c 100644 --- a/tests/federation/transport/server/test__base.py +++ b/tests/federation/transport/server/test__base.py @@ -30,6 +30,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest @@ -53,13 +54,13 @@ def __init__( async def on_GET( self, origin: str, content: None, query: dict[bytes, list[bytes]] ) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} async def on_POST( self, origin: str, content: JsonDict, query: dict[bytes, list[bytes]] ) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index fa6bb4970b..183234b8a0 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -250,7 +250,7 @@ def test_delete_device_and_big_device_inbox(self) -> None: self.assertEqual(10, len(res)) # wait for the task scheduler to do a second delete pass - self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000) + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs()) # remaining messages should now be deleted res = self.get_success( diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 70557a4a5f..623eef0ecb 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -544,7 +544,7 @@ def test_prune_typing_replication(self) -> None: ) self.assertEqual(rows, [(2, [ROOM_ID, []])]) - self.reactor.advance(FORGET_TIMEOUT) + self.reactor.advance(FORGET_TIMEOUT.as_secs()) rows, _, _ = self.get_success( self.handler.get_all_typing_updates( diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index 5bf8305d05..2f1c8f03c6 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -34,6 +34,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from tests import unittest from tests.http.server._base import test_disconnect @@ -108,11 +109,11 @@ def __init__(self, hs: HomeServer): @cancellable async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 3aaa743265..d5e643585d 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -37,6 +37,7 @@ ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests.server import get_clock @@ -184,7 +185,7 @@ async def task(i: int) -> None: scopes.append(scope) self.assertEqual(self._tracer.active_span, scope.span) - await clock.sleep(4) + await clock.sleep(Duration(seconds=4)) self.assertEqual(self._tracer.active_span, scope.span) scope.close() @@ -194,7 +195,7 @@ async def root() -> None: scopes.append(root_scope) d1 = run_in_background(task, 1) - await clock.sleep(2) + await clock.sleep(Duration(seconds=2)) d2 = run_in_background(task, 2) # because we did run_in_background, the active span should still be the @@ -351,7 +352,7 @@ async def bg_task() -> None: # Now wait for the background process to finish while not callback_finished: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, @@ -418,7 +419,7 @@ async def bg_task() -> None: # Now wait for the background process to finish while not callback_finished: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index b757c6428a..1c7e7e997b 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -30,6 +30,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from tests import unittest from tests.http.server._base import test_disconnect @@ -52,7 +53,7 @@ async def _serialize_payload(**kwargs: ReplicationEndpoint) -> JsonDict: async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict ) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} @@ -73,7 +74,7 @@ async def _serialize_payload(**kwargs: ReplicationEndpoint) -> JsonDict: async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict ) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index 25112baaa2..a4a3112e20 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -31,6 +31,7 @@ from synapse.storage.background_updates import BackgroundUpdater from synapse.types import JsonDict from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest @@ -105,7 +106,7 @@ def _register_bg_update(self) -> None: "Adds a bg update but doesn't start it" async def _fake_update(progress: JsonDict, batch_size: int) -> int: - await self.clock.sleep(0.2) + await self.clock.sleep(Duration(milliseconds=200)) return batch_size self.store.db_pool.updates.register_background_update_handler( diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 7daf13ad22..1c340efa0c 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -44,6 +44,7 @@ ) from synapse.types import UserID from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.task_scheduler import TaskScheduler from tests import unittest @@ -1161,7 +1162,7 @@ def test_delete_same_room_twice(self) -> None: # Mock PaginationHandler.purge_room to sleep for 100s, so we have time to do a second call # before the purge is over. Note that it doesn't purge anymore, but we don't care. async def purge_room(room_id: str, force: bool) -> None: - await self.hs.get_clock().sleep(100) + await self.hs.get_clock().sleep(Duration(seconds=100)) self.pagination_handler.purge_room = AsyncMock(side_effect=purge_room) # type: ignore[method-assign] @@ -1464,7 +1465,7 @@ def test_scheduled_purge_room(self) -> None: self._is_purged(room_id) # Wait for next scheduler run - self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS) + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs()) self._is_purged(room_id) @@ -1501,7 +1502,7 @@ def test_schedule_shutdown_room(self) -> None: self._is_purged(room_id) # Wait for next scheduler run - self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS) + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs()) # Test that all users has been kicked (room is shutdown) self._has_no_members(room_id) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 0407bb5347..31586a451f 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -29,6 +29,7 @@ from synapse.rest.client.transactions import CLEANUP_PERIOD, HttpTransactionCache from synapse.types import ISynapseReactor, JsonDict from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.server import get_clock @@ -93,7 +94,7 @@ def cb() -> Generator["defer.Deferred[object]", object, tuple[int, JsonDict]]: # Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock` # for testing purposes. yield defer.ensureDeferred( - Clock(reactor, server_name="test_server").sleep(0) # type: ignore[multiple-internal-clocks] + Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks] ) return 1, {} diff --git a/tests/server_notices/__init__.py b/tests/server_notices/__init__.py index eca52930db..19bda218e3 100644 --- a/tests/server_notices/__init__.py +++ b/tests/server_notices/__init__.py @@ -20,6 +20,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.unittest import override_config @@ -131,7 +132,7 @@ def _check_user_received_server_notice( break # Sleep and try again. - self.get_success(self.clock.sleep(0.1)) + self.get_success(self.clock.sleep(Duration(milliseconds=100))) else: self.fail( f"Failed to join the server notices room. No 'join' field in sync_body['rooms']: {sync_body['rooms']}" diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 7db710846d..85ce5bede2 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -42,6 +42,7 @@ ) from synapse.storage.databases.main.event_federation import StateDifference from synapse.types import EventID, StateMap +from synapse.util.duration import Duration from tests import unittest @@ -61,7 +62,7 @@ class FakeClock: - async def sleep(self, msec: float) -> None: + async def sleep(self, duration: Duration) -> None: return None diff --git a/tests/state/test_v21.py b/tests/state/test_v21.py index b17773fb56..58d800f921 100644 --- a/tests/state/test_v21.py +++ b/tests/state/test_v21.py @@ -39,6 +39,7 @@ ) from synapse.types import StateMap from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.state.test_v2 import TestStateResolutionStore @@ -66,7 +67,7 @@ def monotonic_timestamp() -> int: class FakeClock: - async def sleep(self, duration_ms: float) -> None: + async def sleep(self, duration: Duration) -> None: defer.succeed(None) diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index 3743a4a386..622eb96ded 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -26,7 +26,7 @@ from twisted.internet.testing import MemoryReactor from synapse.server import HomeServer -from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS, _RENEWAL_INTERVAL_MS +from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS, _RENEWAL_INTERVAL from synapse.util.clock import Clock from tests import unittest @@ -377,7 +377,7 @@ def test_maintain_lock(self) -> None: # Wait for ages with the lock, we should not be able to get the lock. for _ in range(10): - self.reactor.advance((_RENEWAL_INTERVAL_MS / 1000)) + self.reactor.advance((_RENEWAL_INTERVAL.as_secs())) lock2 = self.get_success( self.store.try_acquire_read_write_lock("name", "key", write=True) diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 3505423691..e3f79d7670 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -38,6 +38,7 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import JsonDict from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.unittest import override_config @@ -59,7 +60,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: async def update(self, progress: JsonDict, count: int) -> int: duration_ms = 10 - await self.clock.sleep((count * duration_ms) / 1000) + await self.clock.sleep(Duration(milliseconds=count * duration_ms)) progress = {"my_key": progress["my_key"] + 1} await self.store.db_pool.runInteraction( "update_progress", @@ -309,7 +310,7 @@ def test_background_update_min_batch_set_in_config(self) -> None: # Run the update with the long-running update item async def update_long(progress: JsonDict, count: int) -> int: - await self.clock.sleep((count * duration_ms) / 1000) + await self.clock.sleep(Duration(milliseconds=count * duration_ms)) progress = {"my_key": progress["my_key"] + 1} await self.store.db_pool.runInteraction( "update_progress", diff --git a/tests/test_server.py b/tests/test_server.py index 2df6bdfa44..ec31b6cc5f 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -38,6 +38,7 @@ from synapse.types import JsonDict from synapse.util.cancellation import cancellable from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.http.server._base import test_disconnect @@ -406,11 +407,11 @@ def __init__(self, clock: Clock): @cancellable async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} async def _async_render_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} @@ -423,11 +424,11 @@ def __init__(self, clock: Clock): @cancellable async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, bytes]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, b"ok" async def _async_render_POST(self, request: SynapseRequest) -> tuple[int, bytes]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, b"ok" diff --git a/tests/util/caches/test_response_cache.py b/tests/util/caches/test_response_cache.py index 30cd6ef0e4..def5c817db 100644 --- a/tests/util/caches/test_response_cache.py +++ b/tests/util/caches/test_response_cache.py @@ -26,6 +26,7 @@ from twisted.internet import defer from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext +from synapse.util.duration import Duration from tests.server import get_clock from tests.unittest import TestCase @@ -55,7 +56,7 @@ async def instant_return(o: str) -> str: return o async def delayed_return(self, o: str) -> str: - await self.clock.sleep(1) + await self.clock.sleep(Duration(seconds=1)) return o def test_cache_hit(self) -> None: @@ -182,7 +183,7 @@ def test_cache_context_nocache(self, should_cache: bool) -> None: async def non_caching(o: str, cache_context: ResponseCacheContext[int]) -> str: nonlocal call_count call_count += 1 - await self.clock.sleep(1) + await self.clock.sleep(Duration(seconds=1)) cache_context.should_cache = should_cache return o diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index ca805bb20a..a4114cdfcc 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -37,6 +37,7 @@ ) from synapse.types import ISynapseReactor from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.unittest import logcontext_clean @@ -82,7 +83,7 @@ async def competing_callback() -> None: self._check_test_key("sentinel") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("sentinel") @@ -94,9 +95,9 @@ async def competing_callback() -> None: reactor.callLater(0, lambda: defer.ensureDeferred(competing_callback())) # type: ignore[call-later-not-tracked] with LoggingContext(name="foo", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -128,7 +129,7 @@ async def competing_callback() -> None: self._check_test_key("looping_call") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("looping_call") @@ -139,12 +140,12 @@ async def competing_callback() -> None: with LoggingContext(name="foo", server_name="test_server"): lc = clock.looping_call( - lambda: defer.ensureDeferred(competing_callback()), 0 + lambda: defer.ensureDeferred(competing_callback()), Duration(seconds=0) ) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -179,7 +180,7 @@ async def competing_callback() -> None: self._check_test_key("looping_call") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("looping_call") @@ -190,10 +191,10 @@ async def competing_callback() -> None: with LoggingContext(name="foo", server_name="test_server"): lc = clock.looping_call_now( - lambda: defer.ensureDeferred(competing_callback()), 0 + lambda: defer.ensureDeferred(competing_callback()), Duration(seconds=0) ) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -228,7 +229,7 @@ async def competing_callback() -> None: self._check_test_key("call_later") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("call_later") @@ -238,11 +239,13 @@ async def competing_callback() -> None: callback_finished = True with LoggingContext(name="foo", server_name="test_server"): - clock.call_later(0, lambda: defer.ensureDeferred(competing_callback())) + clock.call_later( + Duration(seconds=0), lambda: defer.ensureDeferred(competing_callback()) + ) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -280,7 +283,7 @@ async def competing_callback() -> None: self._check_test_key("foo") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("foo") @@ -303,7 +306,7 @@ async def competing_callback() -> None: await d self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, @@ -338,7 +341,7 @@ async def competing_callback() -> None: self._check_test_key("sentinel") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("sentinel") @@ -364,7 +367,7 @@ async def competing_callback() -> None: d.callback(None) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, @@ -400,7 +403,7 @@ async def competing_callback() -> None: self._check_test_key("foo") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("foo") @@ -446,7 +449,7 @@ async def competing_callback() -> None: run_in_background(lambda: (d.callback(None), d)[1]) # type: ignore[call-overload, func-returns-value] self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, @@ -486,7 +489,7 @@ def callback(result: object) -> object: # Now wait for the function under test to have run, and check that # the logcontext is left in a sane state. while not callback_finished: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -501,7 +504,7 @@ def callback(result: object) -> object: async def test_run_in_background_with_blocking_fn(self) -> None: async def blocking_function() -> None: # Ignore linter error since we are creating a `Clock` for testing purposes. - await Clock(reactor, server_name="test_server").sleep(0) # type: ignore[multiple-internal-clocks] + await Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks] await self._test_run_in_background(blocking_function) @@ -535,7 +538,9 @@ async def test_run_in_background_with_coroutine(self) -> None: async def testfunc() -> None: self._check_test_key("foo") # Ignore linter error since we are creating a `Clock` for testing purposes. - d = defer.ensureDeferred(Clock(reactor, server_name="test_server").sleep(0)) # type: ignore[multiple-internal-clocks] + d = defer.ensureDeferred( + Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks] + ) self.assertIs(current_context(), SENTINEL_CONTEXT) await d self._check_test_key("foo") @@ -579,7 +584,7 @@ async def competing_callback() -> None: self._check_test_key("foo") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("foo") @@ -591,7 +596,7 @@ async def competing_callback() -> None: with LoggingContext(name="foo", server_name="test_server"): run_coroutine_in_background(competing_callback()) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py index e33ded8a7f..2c8e21b339 100644 --- a/tests/util/test_task_scheduler.py +++ b/tests/util/test_task_scheduler.py @@ -26,6 +26,7 @@ from synapse.server import HomeServer from synapse.types import JsonMapping, ScheduledTask, TaskStatus from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.task_scheduler import TaskScheduler from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -68,7 +69,7 @@ def test_schedule_task(self) -> None: # The timestamp being 30s after now the task should been executed # after the first scheduling loop is run - self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000) + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs()) task = self.get_success(self.task_scheduler.get_task(task_id)) assert task is not None @@ -87,7 +88,7 @@ async def _sleeping_task( self, task: ScheduledTask ) -> tuple[TaskStatus, JsonMapping | None, str | None]: # Sleep for a second - await self.hs.get_clock().sleep(1) + await self.hs.get_clock().sleep(Duration(seconds=1)) return TaskStatus.COMPLETE, None, None def test_schedule_lot_of_tasks(self) -> None: @@ -187,7 +188,7 @@ def test_schedule_resumable_task(self) -> None: # Simulate a synapse restart by emptying the list of running tasks self.task_scheduler._running_tasks = set() - self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) + self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL.as_secs())) task = self.get_success(self.task_scheduler.get_task(task_id)) assert task is not None From 119f02e3b385652c85c8acbf9d125c1f406e914e Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Mon, 1 Dec 2025 15:24:26 +0000 Subject: [PATCH 24/47] Return 400 when canonical_alias content invalid (#19240) Fixes #19198 Returns HTTP 400 when `alias` or `alt_alias` inside of `m.room.canonical_alias` `content` are not of type string. Previously this resulted in HTTP 500 errors as Synapse assumed they were strings and would raise an exception when it tried to treat them as such if they actually weren't. With the changes implemented: Screenshot from 2025-11-28 16-48-06 Screenshot from 2025-11-28 16-47-42 ### Pull Request Checklist * [X] Pull request is based on the develop branch * [X] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [X] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- changelog.d/19240.bugfix | 1 + synapse/handlers/message.py | 12 ++++++++++++ tests/rest/client/test_rooms.py | 2 ++ 3 files changed, 15 insertions(+) create mode 100644 changelog.d/19240.bugfix diff --git a/changelog.d/19240.bugfix b/changelog.d/19240.bugfix new file mode 100644 index 0000000000..d8490bcc1f --- /dev/null +++ b/changelog.d/19240.bugfix @@ -0,0 +1 @@ +Fix bug where invalid `canonical_alias` content would return 500 instead of 400. diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index bac4bd9361..a6499de3a8 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -1955,6 +1955,12 @@ async def persist_and_notify_client_events( room_alias_str = event.content.get("alias", None) directory_handler = self.hs.get_directory_handler() if room_alias_str and room_alias_str != original_alias: + if not isinstance(room_alias_str, str): + raise SynapseError( + 400, + "The alias must be of type string.", + Codes.INVALID_PARAM, + ) await self._validate_canonical_alias( directory_handler, room_alias_str, event.room_id ) @@ -1978,6 +1984,12 @@ async def persist_and_notify_client_events( new_alt_aliases = set(alt_aliases) - set(original_alt_aliases) if new_alt_aliases: for alias_str in new_alt_aliases: + if not isinstance(alias_str, str): + raise SynapseError( + 400, + "Each alt_alias must be of type string.", + Codes.INVALID_PARAM, + ) await self._validate_canonical_alias( directory_handler, alias_str, event.room_id ) diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 68e09afc54..926560afd6 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -3880,9 +3880,11 @@ def test_bad_data(self) -> None: self._set_canonical_alias({"alt_aliases": False}, expected_code=400) self._set_canonical_alias({"alt_aliases": True}, expected_code=400) self._set_canonical_alias({"alt_aliases": {}}, expected_code=400) + self._set_canonical_alias({"alt_aliases": [0]}, expected_code=400) def test_bad_alias(self) -> None: """An alias which does not point to the room raises a SynapseError.""" + self._set_canonical_alias({"alias": {"@unknown:test": "a"}}, expected_code=400) self._set_canonical_alias({"alias": "@unknown:test"}, expected_code=400) self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400) From b4ee0bf71e7082abda612a36299e9a73f8e99ac0 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 15:38:19 +0000 Subject: [PATCH 25/47] Bump actions/setup-python from 6.0.0 to 6.1.0 (#19245) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [actions/setup-python](https://github.com/actions/setup-python) from 6.0.0 to 6.1.0.
Release notes

Sourced from actions/setup-python's releases.

v6.1.0

What's Changed

Enhancements:

Dependency and Documentation updates:

New Contributors

Full Changelog: https://github.com/actions/setup-python/compare/v6...v6.1.0

Commits
  • 83679a8 Bump @​types/node from 24.1.0 to 24.9.1 and update macos-13 to macos-15-intel ...
  • bfc4944 Bump prettier from 3.5.3 to 3.6.2 (#1234)
  • 97aeb3e Bump requests from 2.32.2 to 2.32.4 in /tests/data (#1130)
  • 443da59 Bump actions/publish-action from 0.3.0 to 0.4.0 & Documentation update for pi...
  • cfd55ca graalpy: add graalpy early-access and windows builds (#880)
  • bba65e5 Bump typescript from 5.4.2 to 5.9.3 and update docs/advanced-usage.md (#1094)
  • 18566f8 Improve wording and "fix example" (remove 3.13) on testing against pre-releas...
  • 2e3e4b1 Add support for pip-install input (#1201)
  • 4267e28 Bump urllib3 from 1.26.19 to 2.5.0 in /tests/data and document breaking c...
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=actions/setup-python&package-manager=github_actions&previous-version=6.0.0&new-version=6.1.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docs-pr.yaml | 2 +- .github/workflows/docs.yaml | 2 +- .github/workflows/latest_deps.yml | 2 +- .github/workflows/poetry_lockfile.yaml | 2 +- .github/workflows/release-artifacts.yml | 8 ++++---- .github/workflows/schema.yaml | 4 ++-- .github/workflows/tests.yml | 12 ++++++------ 7 files changed, 16 insertions(+), 16 deletions(-) diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index d59e069171..4d28533a27 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -24,7 +24,7 @@ jobs: mdbook-version: '0.4.17' - name: Setup python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index d02428db19..51944b13e8 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -64,7 +64,7 @@ jobs: run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js - name: Setup python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index eee8dc7e0b..a4865fea81 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -93,7 +93,7 @@ jobs: -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ postgres:${{ matrix.postgres-version }} - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: pip install .[all,test] diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml index 62b796287c..5c139bf574 100644 --- a/.github/workflows/poetry_lockfile.yaml +++ b/.github/workflows/poetry_lockfile.yaml @@ -17,7 +17,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.x' - run: pip install tomli diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index e63d65fdf3..03c2b0a326 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -28,7 +28,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - id: set-distros @@ -74,7 +74,7 @@ jobs: ${{ runner.os }}-buildx- - name: Set up python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" @@ -134,7 +134,7 @@ jobs: steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: # setup-python@v4 doesn't impose a default python version. Need to use 3.x # here, because `python` on osx points to Python 2.7. @@ -171,7 +171,7 @@ jobs: steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.10" diff --git a/.github/workflows/schema.yaml b/.github/workflows/schema.yaml index 52b5cd4c53..0755a5f023 100644 --- a/.github/workflows/schema.yaml +++ b/.github/workflows/schema.yaml @@ -15,7 +15,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - name: Install check-jsonschema @@ -41,7 +41,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - name: Install PyYAML diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index c32d018a64..7138268a2b 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -107,7 +107,7 @@ jobs: steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20' 'sqlglot>=28.0.0'" @@ -117,7 +117,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: .ci/scripts/check_lockfile.py @@ -200,7 +200,7 @@ jobs: with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: "pip install 'towncrier>=18.6.0rc1'" @@ -308,7 +308,7 @@ jobs: if: ${{ needs.changes.outputs.linting_readme == 'true' }} steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: "pip install rstcheck" @@ -356,7 +356,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - id: get-matrix @@ -447,7 +447,7 @@ jobs: sudo apt-get -qq install build-essential libffi-dev python3-dev \ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.10' From bf6163c8bf43c5d1e33ee48ae06ec44ce43681c9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 15:38:50 +0000 Subject: [PATCH 26/47] Bump docker/metadata-action from 5.9.0 to 5.10.0 (#19246) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [docker/metadata-action](https://github.com/docker/metadata-action) from 5.9.0 to 5.10.0.
Release notes

Sourced from docker/metadata-action's releases.

v5.10.0

Full Changelog: https://github.com/docker/metadata-action/compare/v5.9.0...v5.10.0

Commits
  • c299e40 Merge pull request #569 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • f015d79 chore: update generated content
  • 121bcc2 chore(deps): Bump @​docker/actions-toolkit from 0.67.0 to 0.68.0
  • f7b6bf4 Merge pull request #564 from docker/dependabot/npm_and_yarn/js-yaml-3.14.2
  • 0b95c6b Merge pull request #565 from docker/dependabot/github_actions/actions/checkout-6
  • 17f70d7 Merge pull request #568 from motoki317/docs/fix-to-24h-schedule-pattern
  • afd7e6d docs(README): Fix date format from 12h to 24h in schedule pattern
  • 602aff8 chore(deps): Bump actions/checkout from 5 to 6
  • aecb1a4 chore(deps): Bump js-yaml from 3.14.1 to 3.14.2
  • 8d8c7c1 Merge pull request #559 from docker/dependabot/npm_and_yarn/docker/actions-to...
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=docker/metadata-action&package-manager=github_actions&previous-version=5.9.0&new-version=5.10.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/docker.yml | 2 +- .github/workflows/push_complement_image.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index d0cdd3acaf..aaf1e22d3c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -123,7 +123,7 @@ jobs: uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 - name: Calculate docker image tag - uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0 + uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 with: images: ${{ matrix.repository }} flavor: | diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index c562275a38..ed82482505 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -55,7 +55,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Work out labels for complement image id: meta - uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0 + uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 with: images: ghcr.io/${{ github.repository }}/complement-synapse tags: | From 58dd25976cfb8f485c4b106c3edab28309bdbcfa Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 15:41:40 +0000 Subject: [PATCH 27/47] Bump http from 1.3.1 to 1.4.0 (#19249) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [http](https://github.com/hyperium/http) from 1.3.1 to 1.4.0.
Release notes

Sourced from http's releases.

v1.4.0

Highlights

  • Add StatusCode::EARLY_HINTS constant for 103 Early Hints.
  • Make StatusCode::from_u16 now a const fn.
  • Make Authority::from_static now a const fn.
  • Make PathAndQuery::from_static now a const fn.
  • MSRV increased to 1.57 (allows legible const fn panic messages).

What's Changed

New Contributors

Full Changelog: https://github.com/hyperium/http/compare/v1.3.1...v1.4.0

Changelog

Sourced from http's changelog.

1.4.0 (November 24, 2025)

  • Add StatusCode::EARLY_HINTS constant for 103 Early Hints.
  • Make StatusCode::from_u16 now a const fn.
  • Make Authority::from_static now a const fn.
  • Make PathAndQuery::from_static now a const fn.
  • MSRV increased to 1.57 (allows legible const fn panic messages).
Commits
  • b9625d8 v1.4.0
  • 50b009c refactor(header): inline FNV hasher to reduce dependencies (#796)
  • b370d36 feat(uri): make Authority/PathAndQuery::from_static const (#786)
  • 0d74251 chore(ci): update to actions/checkout@v5 (#800)
  • a760767 docs: remove unnecessary extern crate sentence (#799)
  • fb1d457 refactor(header): use better panic message in const HeaderName and HeaderValu...
  • 20dbd6e feat(status): Add 103 EARLY_HINTS status code (#758)
  • e7a7337 chore: bump MSRV to 1.57
  • 1888e28 tests: downgrade rand back to 0.8 for now
  • 918bbc3 chore: minor improvement for docs (#790)
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=http&package-manager=cargo&previous-version=1.3.1&new-version=1.4.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c89d0829ba..007428a380 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -374,12 +374,11 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] From d435cfc125f49ccf4c8897923b498b4fb49b4e54 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Mon, 1 Dec 2025 15:47:36 +0000 Subject: [PATCH 28/47] Add mention of future deprecations to release script (#19239) Small improvement to the release script to prompt the user to consider upcoming deprecations that should be mentioned in the changelog. ### Pull Request Checklist * [X] Pull request is based on the develop branch * [X] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [X] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --------- Co-authored-by: Olivier 'reivilibre' --- changelog.d/19239.misc | 1 + scripts-dev/release.py | 6 ++++++ 2 files changed, 7 insertions(+) create mode 100644 changelog.d/19239.misc diff --git a/changelog.d/19239.misc b/changelog.d/19239.misc new file mode 100644 index 0000000000..fd5757eb77 --- /dev/null +++ b/changelog.d/19239.misc @@ -0,0 +1 @@ +Prompt user to consider adding future deprecations to the changelog in release script. diff --git a/scripts-dev/release.py b/scripts-dev/release.py index ba95a19382..17eadbf6c3 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -291,6 +291,12 @@ def _prepare() -> None: synapse_repo.git.add("-u") subprocess.run("git diff --cached", shell=True) + print( + "Consider any upcoming platform deprecations that should be mentioned in the changelog. (e.g. upcoming Python, PostgreSQL or SQLite deprecations)" + ) + print( + "Platform deprecations should be mentioned at least 1 release prior to being unsupported." + ) if click.confirm("Edit changelog?", default=False): click.edit(filename="CHANGES.md") From c20dd888bd7e15d84e98ef59fd3228d3578d0db2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Andre=20Kl=C3=A4rner?= Date: Mon, 1 Dec 2025 17:05:07 +0100 Subject: [PATCH 29/47] Document how merging config files works - see #11203 (#19243) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --------- Signed-off-by: Andre Klärner Co-authored-by: Olivier 'reivilibre --- changelog.d/19243.doc | 1 + synapse/config/_base.py | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19243.doc diff --git a/changelog.d/19243.doc b/changelog.d/19243.doc new file mode 100644 index 0000000000..3a396c88d5 --- /dev/null +++ b/changelog.d/19243.doc @@ -0,0 +1 @@ +Document in the `--config-path` help how multiple files are merged - by merging them shallowly. diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 95a00c6718..43dece4a08 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -672,7 +672,8 @@ def load_or_generate_config( action="append", metavar="CONFIG_FILE", help="Specify config file. Can be given multiple times and" - " may specify directories containing *.yaml files.", + " may specify directories containing *.yaml files." + " Top-level keys in later files overwrite ones in earlier files.", ) parser.add_argument( "--no-secrets-in-config", From 38588f9462c06adcc4b020e0da48164af01a58a6 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 16:25:31 +0000 Subject: [PATCH 30/47] Bump Swatinem/rust-cache from 2.8.1 to 2.8.2 (#19244) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [Swatinem/rust-cache](https://github.com/swatinem/rust-cache) from 2.8.1 to 2.8.2.
Release notes

Sourced from Swatinem/rust-cache's releases.

v2.8.2

What's Changed

New Contributors

Full Changelog: https://github.com/Swatinem/rust-cache/compare/v2.8.1...v2.8.2

Changelog

Sourced from Swatinem/rust-cache's changelog.

Changelog

2.8.2

  • Don't overwrite env for cargo-metadata call

2.8.1

  • Set empty CARGO_ENCODED_RUSTFLAGS when retrieving metadata
  • Various dependency updates

2.8.0

  • Add support for warpbuild cache provider
  • Add new cache-workspace-crates feature

2.7.8

  • Include CPU arch in the cache key

2.7.7

  • Also cache cargo install metadata

2.7.6

  • Allow opting out of caching $CARGO_HOME/bin
  • Add runner OS in cache key
  • Adds an option to do lookup-only of the cache

2.7.5

  • Support Cargo.lock format cargo-lock v4
  • Only run macOsWorkaround() on macOS

2.7.3

  • Work around upstream problem that causes cache saving to hang for minutes.

2.7.2

  • Only key by Cargo.toml and Cargo.lock files of workspace members.

2.7.1

  • Update toml parser to fix parsing errors.

2.7.0

  • Properly cache trybuild tests.

... (truncated)

Commits
  • 779680d 2.8.2
  • 2ea64ef Bump smol-toml from 1.4.2 to 1.5.2 in the prd-minor group (#287)
  • 8930d9c Bump the actions group with 3 updates (#288)
  • c071727 Bump @​actions/io from 1.1.3 to 2.0.0 in the prd-major group (#281)
  • f2a41b7 Bump @​types/node from 24.9.0 to 24.10.0 in the dev-minor group (#282)
  • e306f83 Don't overwrite env for cargo-metadata call (#285)
  • c911900 Merge pull request #284 from Swatinem/dependabot/github_actions/actions-baeb0...
  • 3aaed55 Bump the actions group with 2 updates
  • 972b315 Merge pull request #283 from Swatinem/dependabot/github_actions/actions-b360d...
  • 07caf06 Bump taiki-e/install-action from 2.62.45 to 2.62.49 in the actions group
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=Swatinem/rust-cache&package-manager=github_actions&previous-version=2.8.1&new-version=2.8.2)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/fix_lint.yaml | 2 +- .github/workflows/latest_deps.yml | 6 +++--- .github/workflows/tests.yml | 24 ++++++++++++------------ .github/workflows/twisted_trunk.yml | 6 +++--- 4 files changed, 19 insertions(+), 19 deletions(-) diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml index a5469be56c..9daea3f378 100644 --- a/.github/workflows/fix_lint.yaml +++ b/.github/workflows/fix_lint.yaml @@ -25,7 +25,7 @@ jobs: with: toolchain: ${{ env.RUST_VERSION }} components: clippy, rustfmt - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Setup Poetry uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index a4865fea81..c356ee8e3d 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -47,7 +47,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 # The dev dependencies aren't exposed in the wheel metadata (at least with current # poetry-core versions), so we install with poetry. @@ -83,7 +83,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: sudo apt-get -qq install xmlsec1 - name: Set up PostgreSQL ${{ matrix.postgres-version }} @@ -158,7 +158,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Ensure sytest runs `pip install` # Delete the lockfile so sytest will `pip install` rather than `poetry install` diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index 7138268a2b..aff2832b94 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -91,7 +91,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: "3.x" @@ -157,7 +157,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Setup Poetry uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 @@ -221,7 +221,7 @@ jobs: with: components: clippy toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo clippy -- -D warnings @@ -240,7 +240,7 @@ jobs: with: toolchain: nightly-2025-04-23 components: clippy - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo clippy --all-features -- -D warnings @@ -257,7 +257,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Setup Poetry uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 @@ -296,7 +296,7 @@ jobs: # `.rustfmt.toml`. toolchain: nightly-2025-04-23 components: rustfmt - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo fmt --check @@ -394,7 +394,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: @@ -438,7 +438,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 # There aren't wheels for some of the older deps, so we need to install # their build dependencies @@ -555,7 +555,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Run SyTest run: /bootstrap.sh synapse @@ -701,7 +701,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh @@ -735,7 +735,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo test @@ -755,7 +755,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: nightly-2022-12-01 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo bench --no-run diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index b07f98b1cb..325902f131 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -49,7 +49,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: @@ -77,7 +77,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: @@ -123,7 +123,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Patch dependencies # Note: The poetry commands want to create a virtualenv in /src/.venv/, From c09298eeaf40438980b76f46ce0ef9c883963160 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 16:45:41 +0000 Subject: [PATCH 31/47] Bump pydantic from 2.12.4 to 2.12.5 (#19250) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [pydantic](https://github.com/pydantic/pydantic) from 2.12.4 to 2.12.5.
Release notes

Sourced from pydantic's releases.

v2.12.5 2025-11-26

v2.12.5 (2025-11-26)

This is the fifth 2.12 patch release, addressing an issue with the MISSING sentinel and providing several documentation improvements.

The next 2.13 minor release will be published in a couple weeks, and will include a new polymorphic serialization feature addressing the remaining unexpected changes to the serialize as any behavior.

  • Fix pickle error when using model_construct() on a model with MISSING as a default value by @​ornariece in #12522.
  • Several updates to the documentation by @​Viicos.

Full Changelog: https://github.com/pydantic/pydantic/compare/v2.12.4...v2.12.5

Changelog

Sourced from pydantic's changelog.

v2.12.5 (2025-11-26)

GitHub release

This is the fifth 2.12 patch release, addressing an issue with the MISSING sentinel and providing several documentation improvements.

The next 2.13 minor release will be published in a couple weeks, and will include a new polymorphic serialization feature addressing the remaining unexpected changes to the serialize as any behavior.

  • Fix pickle error when using model_construct() on a model with MISSING as a default value by @​ornariece in #12522.
  • Several updates to the documentation by @​Viicos.
Commits
  • bd2d0dd Prepare release v2.12.5
  • 7d0302e Document security implications when using create_model()
  • e9ef980 Fix typo in Standard Library Types documentation
  • f2c20c0 Add pydantic-docs dev dependency, make use of versioning blocks
  • a76c1aa Update documentation about JSON Schema
  • 8cbc72c Add documentation about custom __init__()
  • 99eba59 Add additional test for FieldInfo.get_default()
  • c710769 Special case MISSING sentinel in smart_deepcopy()
  • 20a9d77 Do not delete mock validator/serializer in rebuild_dataclass()
  • c86515a Update parts of the model and revalidate_instances documentation
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=pydantic&package-manager=pip&previous-version=2.12.4&new-version=2.12.5)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/poetry.lock b/poetry.lock index 35c642fdeb..ee45f3e464 100644 --- a/poetry.lock +++ b/poetry.lock @@ -31,7 +31,7 @@ description = "The ultimate Python library in building OAuth and OpenID Connect optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"oidc\" or extra == \"jwt\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"jwt\" or extra == \"oidc\"" files = [ {file = "authlib-1.6.5-py2.py3-none-any.whl", hash = "sha256:3e0e0507807f842b02175507bdee8957a1d5707fd4afb17c32fb43fee90b6e3a"}, {file = "authlib-1.6.5.tar.gz", hash = "sha256:6aaf9c79b7cc96c900f0b284061691c5d4e61221640a948fe690b556a6d6d10b"}, @@ -446,7 +446,7 @@ description = "XML bomb protection for Python stdlib modules" optional = true python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, @@ -471,7 +471,7 @@ description = "XPath 1.0/2.0/3.0/3.1 parsers and selectors for ElementTree and l optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "elementpath-4.1.5-py3-none-any.whl", hash = "sha256:2ac1a2fb31eb22bbbf817f8cf6752f844513216263f0e3892c8e79782fe4bb55"}, {file = "elementpath-4.1.5.tar.gz", hash = "sha256:c2d6dc524b29ef751ecfc416b0627668119d8812441c555d7471da41d4bacb8d"}, @@ -521,7 +521,7 @@ description = "Python wrapper for hiredis" optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"redis\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"redis\"" files = [ {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:9937d9b69321b393fbace69f55423480f098120bc55a3316e1ca3508c4dbbd6f"}, {file = "hiredis-3.3.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:50351b77f89ba6a22aff430b993653847f36b71d444509036baa0f2d79d1ebf4"}, @@ -844,7 +844,7 @@ description = "Jaeger Python OpenTracing Tracer implementation" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "jaeger-client-4.8.0.tar.gz", hash = "sha256:3157836edab8e2c209bd2d6ae61113db36f7ee399e66b1dcbb715d87ab49bfe0"}, ] @@ -982,7 +982,7 @@ description = "A strictly RFC 4510 conforming LDAP V3 pure Python client library optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" files = [ {file = "ldap3-2.9.1-py2.py3-none-any.whl", hash = "sha256:5869596fc4948797020d3f03b7939da938778a0f9e2009f7a072ccf92b8e8d70"}, {file = "ldap3-2.9.1.tar.gz", hash = "sha256:f3e7fc4718e3f09dda568b57100095e0ce58633bcabbed8667ce3f8fbaa4229f"}, @@ -998,7 +998,7 @@ description = "Powerful and Pythonic XML processing library combining libxml2/li optional = true python-versions = ">=3.8" groups = ["main"] -markers = "extra == \"url-preview\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"url-preview\"" files = [ {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e77dd455b9a16bbd2a5036a63ddbd479c19572af81b624e79ef422f929eef388"}, {file = "lxml-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:5d444858b9f07cefff6455b983aea9a67f7462ba1f6cbe4a21e8bf6791bf2153"}, @@ -1284,7 +1284,7 @@ description = "An LDAP3 auth provider for Synapse" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"matrix-synapse-ldap3\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"matrix-synapse-ldap3\"" files = [ {file = "matrix-synapse-ldap3-0.3.0.tar.gz", hash = "sha256:8bb6517173164d4b9cc44f49de411d8cebdb2e705d5dd1ea1f38733c4a009e1d"}, {file = "matrix_synapse_ldap3-0.3.0-py3-none-any.whl", hash = "sha256:8b4d701f8702551e98cc1d8c20dbed532de5613584c08d0df22de376ba99159d"}, @@ -1526,7 +1526,7 @@ description = "OpenTracing API for Python. See documentation at http://opentraci optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "opentracing-2.4.0.tar.gz", hash = "sha256:a173117e6ef580d55874734d1fa7ecb6f3655160b8b8974a2a1e98e5ec9c840d"}, ] @@ -1716,7 +1716,7 @@ description = "psycopg2 - Python-PostgreSQL Database Adapter" optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"postgres\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"postgres\"" files = [ {file = "psycopg2-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:103e857f46bb76908768ead4e2d0ba1d1a130e7b8ed77d3ae91e8b33481813e8"}, {file = "psycopg2-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:210daed32e18f35e3140a1ebe059ac29209dd96468f2f7559aa59f75ee82a5cb"}, @@ -1734,7 +1734,7 @@ description = ".. image:: https://travis-ci.org/chtd/psycopg2cffi.svg?branch=mas optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" files = [ {file = "psycopg2cffi-2.9.0.tar.gz", hash = "sha256:7e272edcd837de3a1d12b62185eb85c45a19feda9e62fa1b120c54f9e8d35c52"}, ] @@ -1750,7 +1750,7 @@ description = "A Simple library to enable psycopg2 compatability" optional = true python-versions = "*" groups = ["main"] -markers = "platform_python_implementation == \"PyPy\" and (extra == \"postgres\" or extra == \"all\")" +markers = "platform_python_implementation == \"PyPy\" and (extra == \"all\" or extra == \"postgres\")" files = [ {file = "psycopg2cffi-compat-1.1.tar.gz", hash = "sha256:d25e921748475522b33d13420aad5c2831c743227dc1f1f2585e0fdb5c914e05"}, ] @@ -1799,14 +1799,14 @@ files = [ [[package]] name = "pydantic" -version = "2.12.4" +version = "2.12.5" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"}, - {file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"}, + {file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"}, + {file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"}, ] [package.dependencies] @@ -2031,7 +2031,7 @@ description = "A development tool to measure, monitor and analyze the memory beh optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"cache-memory\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"cache-memory\"" files = [ {file = "Pympler-1.0.1-py3-none-any.whl", hash = "sha256:d260dda9ae781e1eab6ea15bacb84015849833ba5555f141d2d9b7b7473b307d"}, {file = "Pympler-1.0.1.tar.gz", hash = "sha256:993f1a3599ca3f4fcd7160c7545ad06310c9e12f70174ae7ae8d4e25f6c5d3fa"}, @@ -2091,7 +2091,7 @@ description = "Python implementation of SAML Version 2 Standard" optional = true python-versions = ">=3.9,<4.0" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "pysaml2-7.5.0-py3-none-any.whl", hash = "sha256:bc6627cc344476a83c757f440a73fda1369f13b6fda1b4e16bca63ffbabb5318"}, {file = "pysaml2-7.5.0.tar.gz", hash = "sha256:f36871d4e5ee857c6b85532e942550d2cf90ea4ee943d75eb681044bbc4f54f7"}, @@ -2116,7 +2116,7 @@ description = "Extensions to the standard Python datetime module" optional = true python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, @@ -2144,7 +2144,7 @@ description = "World timezone definitions, modern and historical" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "pytz-2022.7.1-py2.py3-none-any.whl", hash = "sha256:78f4f37d8198e0627c5f1143240bb0206b8691d8d7ac6d78fee88b78733f8c4a"}, {file = "pytz-2022.7.1.tar.gz", hash = "sha256:01a0681c4b9684a28304615eba55d1ab31ae00bf68ec157ec3708a8182dbbcd0"}, @@ -2548,7 +2548,7 @@ description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" groups = ["main"] -markers = "extra == \"sentry\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"sentry\"" files = [ {file = "sentry_sdk-2.46.0-py2.py3-none-any.whl", hash = "sha256:4eeeb60198074dff8d066ea153fa6f241fef1668c10900ea53a4200abc8da9b1"}, {file = "sentry_sdk-2.46.0.tar.gz", hash = "sha256:91821a23460725734b7741523021601593f35731808afc0bb2ba46c27b8acd91"}, @@ -2758,7 +2758,7 @@ description = "Tornado IOLoop Backed Concurrent Futures" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "threadloop-1.0.2-py2-none-any.whl", hash = "sha256:5c90dbefab6ffbdba26afb4829d2a9df8275d13ac7dc58dccb0e279992679599"}, {file = "threadloop-1.0.2.tar.gz", hash = "sha256:8b180aac31013de13c2ad5c834819771992d350267bddb854613ae77ef571944"}, @@ -2774,7 +2774,7 @@ description = "Python bindings for the Apache Thrift RPC system" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "thrift-0.16.0.tar.gz", hash = "sha256:2b5b6488fcded21f9d312aa23c9ff6a0195d0f6ae26ddbd5ad9e3e25dfc14408"}, ] @@ -2847,7 +2847,7 @@ description = "Tornado is a Python web framework and asynchronous networking lib optional = true python-versions = ">=3.9" groups = ["main"] -markers = "extra == \"opentracing\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"opentracing\"" files = [ {file = "tornado-6.5-cp39-abi3-macosx_10_9_universal2.whl", hash = "sha256:f81067dad2e4443b015368b24e802d0083fecada4f0a4572fdb72fc06e54a9a6"}, {file = "tornado-6.5-cp39-abi3-macosx_10_9_x86_64.whl", hash = "sha256:9ac1cbe1db860b3cbb251e795c701c41d343f06a96049d6274e7c77559117e41"}, @@ -2981,7 +2981,7 @@ description = "non-blocking redis client for python" optional = true python-versions = "*" groups = ["main"] -markers = "extra == \"redis\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"redis\"" files = [ {file = "txredisapi-1.4.11-py3-none-any.whl", hash = "sha256:ac64d7a9342b58edca13ef267d4fa7637c1aa63f8595e066801c1e8b56b22d0b"}, {file = "txredisapi-1.4.11.tar.gz", hash = "sha256:3eb1af99aefdefb59eb877b1dd08861efad60915e30ad5bf3d5bf6c5cedcdbc6"}, @@ -3227,7 +3227,7 @@ description = "An XML Schema validator and decoder" optional = true python-versions = ">=3.7" groups = ["main"] -markers = "extra == \"saml2\" or extra == \"all\"" +markers = "extra == \"all\" or extra == \"saml2\"" files = [ {file = "xmlschema-2.4.0-py3-none-any.whl", hash = "sha256:dc87be0caaa61f42649899189aab2fd8e0d567f2cf548433ba7b79278d231a4a"}, {file = "xmlschema-2.4.0.tar.gz", hash = "sha256:d74cd0c10866ac609e1ef94a5a69b018ad16e39077bc6393408b40c6babee793"}, From e0e7a44fe99a96ef264d4594f9606e659f5c637a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 16:55:16 +0000 Subject: [PATCH 32/47] Bump pyopenssl from 25.1.0 to 25.3.0 (#19248) --- poetry.lock | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/poetry.lock b/poetry.lock index ee45f3e464..d5e5942119 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2066,18 +2066,18 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "25.1.0" +version = "25.3.0" description = "Python wrapper module around the OpenSSL library" optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "pyopenssl-25.1.0-py3-none-any.whl", hash = "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab"}, - {file = "pyopenssl-25.1.0.tar.gz", hash = "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b"}, + {file = "pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6"}, + {file = "pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329"}, ] [package.dependencies] -cryptography = ">=41.0.5,<46" +cryptography = ">=45.0.7,<47" typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""} [package.extras] From 3cf21bc64979d7b0cb56270dd641f01b5ff86984 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 16:55:36 +0000 Subject: [PATCH 33/47] Bump rpds-py from 0.29.0 to 0.30.0 (#19247) --- poetry.lock | 232 ++++++++++++++++++++++++++-------------------------- 1 file changed, 116 insertions(+), 116 deletions(-) diff --git a/poetry.lock b/poetry.lock index d5e5942119..ed0374be32 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2356,127 +2356,127 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.29.0" +version = "0.30.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.10" groups = ["main", "dev"] files = [ - {file = "rpds_py-0.29.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:4ae4b88c6617e1b9e5038ab3fccd7bac0842fdda2b703117b2aa99bc85379113"}, - {file = "rpds_py-0.29.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:7d9128ec9d8cecda6f044001fde4fb71ea7c24325336612ef8179091eb9596b9"}, - {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d37812c3da8e06f2bb35b3cf10e4a7b68e776a706c13058997238762b4e07f4f"}, - {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:66786c3fb1d8de416a7fa8e1cb1ec6ba0a745b2b0eee42f9b7daa26f1a495545"}, - {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b58f5c77f1af888b5fd1876c9a0d9858f6f88a39c9dd7c073a88e57e577da66d"}, - {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:799156ef1f3529ed82c36eb012b5d7a4cf4b6ef556dd7cc192148991d07206ae"}, - {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:453783477aa4f2d9104c4b59b08c871431647cb7af51b549bbf2d9eb9c827756"}, - {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:24a7231493e3c4a4b30138b50cca089a598e52c34cf60b2f35cebf62f274fdea"}, - {file = "rpds_py-0.29.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7033c1010b1f57bb44d8067e8c25aa6fa2e944dbf46ccc8c92b25043839c3fd2"}, - {file = "rpds_py-0.29.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0248b19405422573621172ab8e3a1f29141362d13d9f72bafa2e28ea0cdca5a2"}, - {file = "rpds_py-0.29.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f9f436aee28d13b9ad2c764fc273e0457e37c2e61529a07b928346b219fcde3b"}, - {file = "rpds_py-0.29.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:24a16cb7163933906c62c272de20ea3c228e4542c8c45c1d7dc2b9913e17369a"}, - {file = "rpds_py-0.29.0-cp310-cp310-win32.whl", hash = "sha256:1a409b0310a566bfd1be82119891fefbdce615ccc8aa558aff7835c27988cbef"}, - {file = "rpds_py-0.29.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5523b0009e7c3c1263471b69d8da1c7d41b3ecb4cb62ef72be206b92040a950"}, - {file = "rpds_py-0.29.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:9b9c764a11fd637e0322a488560533112837f5334ffeb48b1be20f6d98a7b437"}, - {file = "rpds_py-0.29.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3fd2164d73812026ce970d44c3ebd51e019d2a26a4425a5dcbdfa93a34abc383"}, - {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a097b7f7f7274164566ae90a221fd725363c0e9d243e2e9ed43d195ccc5495c"}, - {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:7cdc0490374e31cedefefaa1520d5fe38e82fde8748cbc926e7284574c714d6b"}, - {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:89ca2e673ddd5bde9b386da9a0aac0cab0e76f40c8f0aaf0d6311b6bbf2aa311"}, - {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a5d9da3ff5af1ca1249b1adb8ef0573b94c76e6ae880ba1852f033bf429d4588"}, - {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8238d1d310283e87376c12f658b61e1ee23a14c0e54c7c0ce953efdbdc72deed"}, - {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:2d6fb2ad1c36f91c4646989811e84b1ea5e0c3cf9690b826b6e32b7965853a63"}, - {file = "rpds_py-0.29.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:534dc9df211387547267ccdb42253aa30527482acb38dd9b21c5c115d66a96d2"}, - {file = "rpds_py-0.29.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d456e64724a075441e4ed648d7f154dc62e9aabff29bcdf723d0c00e9e1d352f"}, - {file = "rpds_py-0.29.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:a738f2da2f565989401bd6fd0b15990a4d1523c6d7fe83f300b7e7d17212feca"}, - {file = "rpds_py-0.29.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:a110e14508fd26fd2e472bb541f37c209409876ba601cf57e739e87d8a53cf95"}, - {file = "rpds_py-0.29.0-cp311-cp311-win32.whl", hash = "sha256:923248a56dd8d158389a28934f6f69ebf89f218ef96a6b216a9be6861804d3f4"}, - {file = "rpds_py-0.29.0-cp311-cp311-win_amd64.whl", hash = "sha256:539eb77eb043afcc45314d1be09ea6d6cafb3addc73e0547c171c6d636957f60"}, - {file = "rpds_py-0.29.0-cp311-cp311-win_arm64.whl", hash = "sha256:bdb67151ea81fcf02d8f494703fb728d4d34d24556cbff5f417d74f6f5792e7c"}, - {file = "rpds_py-0.29.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a0891cfd8db43e085c0ab93ab7e9b0c8fee84780d436d3b266b113e51e79f954"}, - {file = "rpds_py-0.29.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:3897924d3f9a0361472d884051f9a2460358f9a45b1d85a39a158d2f8f1ad71c"}, - {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2a21deb8e0d1571508c6491ce5ea5e25669b1dd4adf1c9d64b6314842f708b5d"}, - {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9efe71687d6427737a0a2de9ca1c0a216510e6cd08925c44162be23ed7bed2d5"}, - {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:40f65470919dc189c833e86b2c4bd21bd355f98436a2cef9e0a9a92aebc8e57e"}, - {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:def48ff59f181130f1a2cb7c517d16328efac3ec03951cca40c1dc2049747e83"}, - {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad7bd570be92695d89285a4b373006930715b78d96449f686af422debb4d3949"}, - {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:5a572911cd053137bbff8e3a52d31c5d2dba51d3a67ad902629c70185f3f2181"}, - {file = "rpds_py-0.29.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d583d4403bcbf10cffc3ab5cee23d7643fcc960dff85973fd3c2d6c86e8dbb0c"}, - {file = "rpds_py-0.29.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:070befbb868f257d24c3bb350dbd6e2f645e83731f31264b19d7231dd5c396c7"}, - {file = "rpds_py-0.29.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:fc935f6b20b0c9f919a8ff024739174522abd331978f750a74bb68abd117bd19"}, - {file = "rpds_py-0.29.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:8c5a8ecaa44ce2d8d9d20a68a2483a74c07f05d72e94a4dff88906c8807e77b0"}, - {file = "rpds_py-0.29.0-cp312-cp312-win32.whl", hash = "sha256:ba5e1aeaf8dd6d8f6caba1f5539cddda87d511331714b7b5fc908b6cfc3636b7"}, - {file = "rpds_py-0.29.0-cp312-cp312-win_amd64.whl", hash = "sha256:b5f6134faf54b3cb83375db0f113506f8b7770785be1f95a631e7e2892101977"}, - {file = "rpds_py-0.29.0-cp312-cp312-win_arm64.whl", hash = "sha256:b016eddf00dca7944721bf0cd85b6af7f6c4efaf83ee0b37c4133bd39757a8c7"}, - {file = "rpds_py-0.29.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:1585648d0760b88292eecab5181f5651111a69d90eff35d6b78aa32998886a61"}, - {file = "rpds_py-0.29.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:521807963971a23996ddaf764c682b3e46459b3c58ccd79fefbe16718db43154"}, - {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0a8896986efaa243ab713c69e6491a4138410f0fe36f2f4c71e18bd5501e8014"}, - {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1d24564a700ef41480a984c5ebed62b74e6ce5860429b98b1fede76049e953e6"}, - {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e6596b93c010d386ae46c9fba9bfc9fc5965fa8228edeac51576299182c2e31c"}, - {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5cc58aac218826d054c7da7f95821eba94125d88be673ff44267bb89d12a5866"}, - {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de73e40ebc04dd5d9556f50180395322193a78ec247e637e741c1b954810f295"}, - {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:295ce5ac7f0cf69a651ea75c8f76d02a31f98e5698e82a50a5f4d4982fbbae3b"}, - {file = "rpds_py-0.29.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1ea59b23ea931d494459c8338056fe7d93458c0bf3ecc061cd03916505369d55"}, - {file = "rpds_py-0.29.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:f49d41559cebd608042fdcf54ba597a4a7555b49ad5c1c0c03e0af82692661cd"}, - {file = "rpds_py-0.29.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:05a2bd42768ea988294ca328206efbcc66e220d2d9b7836ee5712c07ad6340ea"}, - {file = "rpds_py-0.29.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:33ca7bdfedd83339ca55da3a5e1527ee5870d4b8369456b5777b197756f3ca22"}, - {file = "rpds_py-0.29.0-cp313-cp313-win32.whl", hash = "sha256:20c51ae86a0bb9accc9ad4e6cdeec58d5ebb7f1b09dd4466331fc65e1766aae7"}, - {file = "rpds_py-0.29.0-cp313-cp313-win_amd64.whl", hash = "sha256:6410e66f02803600edb0b1889541f4b5cc298a5ccda0ad789cc50ef23b54813e"}, - {file = "rpds_py-0.29.0-cp313-cp313-win_arm64.whl", hash = "sha256:56838e1cd9174dc23c5691ee29f1d1be9eab357f27efef6bded1328b23e1ced2"}, - {file = "rpds_py-0.29.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:37d94eadf764d16b9a04307f2ab1d7af6dc28774bbe0535c9323101e14877b4c"}, - {file = "rpds_py-0.29.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:d472cf73efe5726a067dce63eebe8215b14beabea7c12606fd9994267b3cfe2b"}, - {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72fdfd5ff8992e4636621826371e3ac5f3e3b8323e9d0e48378e9c13c3dac9d0"}, - {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2549d833abdf8275c901313b9e8ff8fba57e50f6a495035a2a4e30621a2f7cc4"}, - {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4448dad428f28a6a767c3e3b80cde3446a22a0efbddaa2360f4bb4dc836d0688"}, - {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:115f48170fd4296a33938d8c11f697f5f26e0472e43d28f35624764173a60e4d"}, - {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e5bb73ffc029820f4348e9b66b3027493ae00bca6629129cd433fd7a76308ee"}, - {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:b1581fcde18fcdf42ea2403a16a6b646f8eb1e58d7f90a0ce693da441f76942e"}, - {file = "rpds_py-0.29.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:16e9da2bda9eb17ea318b4c335ec9ac1818e88922cbe03a5743ea0da9ecf74fb"}, - {file = "rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:28fd300326dd21198f311534bdb6d7e989dd09b3418b3a91d54a0f384c700967"}, - {file = "rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:2aba991e041d031c7939e1358f583ae405a7bf04804ca806b97a5c0e0af1ea5e"}, - {file = "rpds_py-0.29.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:7f437026dbbc3f08c99cc41a5b2570c6e1a1ddbe48ab19a9b814254128d4ea7a"}, - {file = "rpds_py-0.29.0-cp313-cp313t-win32.whl", hash = "sha256:6e97846e9800a5d0fe7be4d008f0c93d0feeb2700da7b1f7528dabafb31dfadb"}, - {file = "rpds_py-0.29.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f49196aec7c4b406495f60e6f947ad71f317a765f956d74bbd83996b9edc0352"}, - {file = "rpds_py-0.29.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:394d27e4453d3b4d82bb85665dc1fcf4b0badc30fc84282defed71643b50e1a1"}, - {file = "rpds_py-0.29.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:55d827b2ae95425d3be9bc9a5838b6c29d664924f98146557f7715e331d06df8"}, - {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fc31a07ed352e5462d3ee1b22e89285f4ce97d5266f6d1169da1142e78045626"}, - {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4695dd224212f6105db7ea62197144230b808d6b2bba52238906a2762f1d1e7"}, - {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fcae1770b401167f8b9e1e3f566562e6966ffa9ce63639916248a9e25fa8a244"}, - {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:90f30d15f45048448b8da21c41703b31c61119c06c216a1bf8c245812a0f0c17"}, - {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:44a91e0ab77bdc0004b43261a4b8cd6d6b451e8d443754cfda830002b5745b32"}, - {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:4aa195e5804d32c682e453b34474f411ca108e4291c6a0f824ebdc30a91c973c"}, - {file = "rpds_py-0.29.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7971bdb7bf4ee0f7e6f67fa4c7fbc6019d9850cc977d126904392d363f6f8318"}, - {file = "rpds_py-0.29.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:8ae33ad9ce580c7a47452c3b3f7d8a9095ef6208e0a0c7e4e2384f9fc5bf8212"}, - {file = "rpds_py-0.29.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:c661132ab2fb4eeede2ef69670fd60da5235209874d001a98f1542f31f2a8a94"}, - {file = "rpds_py-0.29.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bb78b3a0d31ac1bde132c67015a809948db751cb4e92cdb3f0b242e430b6ed0d"}, - {file = "rpds_py-0.29.0-cp314-cp314-win32.whl", hash = "sha256:f475f103488312e9bd4000bc890a95955a07b2d0b6e8884aef4be56132adbbf1"}, - {file = "rpds_py-0.29.0-cp314-cp314-win_amd64.whl", hash = "sha256:b9cf2359a4fca87cfb6801fae83a76aedf66ee1254a7a151f1341632acf67f1b"}, - {file = "rpds_py-0.29.0-cp314-cp314-win_arm64.whl", hash = "sha256:9ba8028597e824854f0f1733d8b964e914ae3003b22a10c2c664cb6927e0feb9"}, - {file = "rpds_py-0.29.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:e71136fd0612556b35c575dc2726ae04a1669e6a6c378f2240312cf5d1a2ab10"}, - {file = "rpds_py-0.29.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:76fe96632d53f3bf0ea31ede2f53bbe3540cc2736d4aec3b3801b0458499ef3a"}, - {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9459a33f077130dbb2c7c3cea72ee9932271fb3126404ba2a2661e4fe9eb7b79"}, - {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5c9546cfdd5d45e562cc0444b6dddc191e625c62e866bf567a2c69487c7ad28a"}, - {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12597d11d97b8f7e376c88929a6e17acb980e234547c92992f9f7c058f1a7310"}, - {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28de03cf48b8a9e6ec10318f2197b83946ed91e2891f651a109611be4106ac4b"}, - {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd7951c964069039acc9d67a8ff1f0a7f34845ae180ca542b17dc1456b1f1808"}, - {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:c07d107b7316088f1ac0177a7661ca0c6670d443f6fe72e836069025e6266761"}, - {file = "rpds_py-0.29.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:1de2345af363d25696969befc0c1688a6cb5e8b1d32b515ef84fc245c6cddba3"}, - {file = "rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:00e56b12d2199ca96068057e1ae7f9998ab6e99cda82431afafd32f3ec98cca9"}, - {file = "rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3919a3bbecee589300ed25000b6944174e07cd20db70552159207b3f4bbb45b8"}, - {file = "rpds_py-0.29.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:e7fa2ccc312bbd91e43aa5e0869e46bc03278a3dddb8d58833150a18b0f0283a"}, - {file = "rpds_py-0.29.0-cp314-cp314t-win32.whl", hash = "sha256:97c817863ffc397f1e6a6e9d2d89fe5408c0a9922dac0329672fb0f35c867ea5"}, - {file = "rpds_py-0.29.0-cp314-cp314t-win_amd64.whl", hash = "sha256:2023473f444752f0f82a58dfcbee040d0a1b3d1b3c2ec40e884bd25db6d117d2"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:acd82a9e39082dc5f4492d15a6b6c8599aa21db5c35aaf7d6889aea16502c07d"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:715b67eac317bf1c7657508170a3e011a1ea6ccb1c9d5f296e20ba14196be6b3"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f3b1b87a237cb2dba4db18bcfaaa44ba4cd5936b91121b62292ff21df577fc43"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:1c3c3e8101bb06e337c88eb0c0ede3187131f19d97d43ea0e1c5407ea74c0cbf"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2b8e54d6e61f3ecd3abe032065ce83ea63417a24f437e4a3d73d2f85ce7b7cfe"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3fbd4e9aebf110473a420dea85a238b254cf8a15acb04b22a5a6b5ce8925b760"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:80fdf53d36e6c72819993e35d1ebeeb8e8fc688d0c6c2b391b55e335b3afba5a"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:ea7173df5d86f625f8dde6d5929629ad811ed8decda3b60ae603903839ac9ac0"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:76054d540061eda273274f3d13a21a4abdde90e13eaefdc205db37c05230efce"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:9f84c549746a5be3bc7415830747a3a0312573afc9f95785eb35228bb17742ec"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:0ea962671af5cb9a260489e311fa22b2e97103e3f9f0caaea6f81390af96a9ed"}, - {file = "rpds_py-0.29.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:f7728653900035fb7b8d06e1e5900545d8088efc9d5d4545782da7df03ec803f"}, - {file = "rpds_py-0.29.0.tar.gz", hash = "sha256:fe55fe686908f50154d1dc599232016e50c243b438c3b7432f24e2895b0e5359"}, + {file = "rpds_py-0.30.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:679ae98e00c0e8d68a7fda324e16b90fd5260945b45d3b824c892cec9eea3288"}, + {file = "rpds_py-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cc2206b76b4f576934f0ed374b10d7ca5f457858b157ca52064bdfc26b9fc00"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:389a2d49eded1896c3d48b0136ead37c48e221b391c052fba3f4055c367f60a6"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:32c8528634e1bf7121f3de08fa85b138f4e0dc47657866630611b03967f041d7"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f207f69853edd6f6700b86efb84999651baf3789e78a466431df1331608e5324"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67b02ec25ba7a9e8fa74c63b6ca44cf5707f2fbfadae3ee8e7494297d56aa9df"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0e95f6819a19965ff420f65578bacb0b00f251fefe2c8b23347c37174271f3"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:a452763cc5198f2f98898eb98f7569649fe5da666c2dc6b5ddb10fde5a574221"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0b65193a413ccc930671c55153a03ee57cecb49e6227204b04fae512eb657a7"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:858738e9c32147f78b3ac24dc0edb6610000e56dc0f700fd5f651d0a0f0eb9ff"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:da279aa314f00acbb803da1e76fa18666778e8a8f83484fba94526da5de2cba7"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7c64d38fb49b6cdeda16ab49e35fe0da2e1e9b34bc38bd78386530f218b37139"}, + {file = "rpds_py-0.30.0-cp310-cp310-win32.whl", hash = "sha256:6de2a32a1665b93233cde140ff8b3467bdb9e2af2b91079f0333a0974d12d464"}, + {file = "rpds_py-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:1726859cd0de969f88dc8673bdd954185b9104e05806be64bcd87badbe313169"}, + {file = "rpds_py-0.30.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a2bffea6a4ca9f01b3f8e548302470306689684e61602aa3d141e34da06cf425"}, + {file = "rpds_py-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dc4f992dfe1e2bc3ebc7444f6c7051b4bc13cd8e33e43511e8ffd13bf407010d"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:422c3cb9856d80b09d30d2eb255d0754b23e090034e1deb4083f8004bd0761e4"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07ae8a593e1c3c6b82ca3292efbe73c30b61332fd612e05abee07c79359f292f"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f90dd7557b6bd57f40abe7747e81e0c0b119bef015ea7726e69fe550e394a4"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99b47d6ad9a6da00bec6aabe5a6279ecd3c06a329d4aa4771034a21e335c3a97"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33f559f3104504506a44bb666b93a33f5d33133765b0c216a5bf2f1e1503af89"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:946fe926af6e44f3697abbc305ea168c2c31d3e3ef1058cf68f379bf0335a78d"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495aeca4b93d465efde585977365187149e75383ad2684f81519f504f5c13038"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9a0ca5da0386dee0655b4ccdf46119df60e0f10da268d04fe7cc87886872ba7"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d6d1cc13664ec13c1b84241204ff3b12f9bb82464b8ad6e7a5d3486975c2eed"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3896fa1be39912cf0757753826bc8bdc8ca331a28a7c4ae46b7a21280b06bb85"}, + {file = "rpds_py-0.30.0-cp311-cp311-win32.whl", hash = "sha256:55f66022632205940f1827effeff17c4fa7ae1953d2b74a8581baaefb7d16f8c"}, + {file = "rpds_py-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:a51033ff701fca756439d641c0ad09a41d9242fa69121c7d8769604a0a629825"}, + {file = "rpds_py-0.30.0-cp311-cp311-win_arm64.whl", hash = "sha256:47b0ef6231c58f506ef0b74d44e330405caa8428e770fec25329ed2cb971a229"}, + {file = "rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad"}, + {file = "rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394"}, + {file = "rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf"}, + {file = "rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b"}, + {file = "rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e"}, + {file = "rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2"}, + {file = "rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95"}, + {file = "rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d"}, + {file = "rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15"}, + {file = "rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1"}, + {file = "rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a"}, + {file = "rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27"}, + {file = "rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6"}, + {file = "rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d"}, + {file = "rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0"}, + {file = "rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53"}, + {file = "rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed"}, + {file = "rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950"}, + {file = "rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6"}, + {file = "rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb"}, + {file = "rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40"}, + {file = "rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0"}, + {file = "rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c2262bdba0ad4fc6fb5545660673925c2d2a5d9e2e0fb603aad545427be0fc58"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ee6af14263f25eedc3bb918a3c04245106a42dfd4f5c2285ea6f997b1fc3f89a"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3adbb8179ce342d235c31ab8ec511e66c73faa27a47e076ccc92421add53e2bb"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:250fa00e9543ac9b97ac258bd37367ff5256666122c2d0f2bc97577c60a1818c"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9854cf4f488b3d57b9aaeb105f06d78e5529d3145b1e4a41750167e8c213c6d3"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:993914b8e560023bc0a8bf742c5f303551992dcb85e247b1e5c7f4a7d145bda5"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58edca431fb9b29950807e301826586e5bbf24163677732429770a697ffe6738"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:dea5b552272a944763b34394d04577cf0f9bd013207bc32323b5a89a53cf9c2f"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ba3af48635eb83d03f6c9735dfb21785303e73d22ad03d489e88adae6eab8877"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:dff13836529b921e22f15cb099751209a60009731a68519630a24d61f0b1b30a"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b151685b23929ab7beec71080a8889d4d6d9fa9a983d213f07121205d48e2c4"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ac37f9f516c51e5753f27dfdef11a88330f04de2d564be3991384b2f3535d02e"}, + {file = "rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84"}, ] [[package]] From afdf9af6b543a8e3b3ae4f90842a23b04d519659 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 1 Dec 2025 17:55:33 +0000 Subject: [PATCH 34/47] Bump types-jsonschema from 4.25.1.20250822 to 4.25.1.20251009 (#19252) Bumps [types-jsonschema](https://github.com/typeshed-internal/stub_uploader) from 4.25.1.20250822 to 4.25.1.20251009.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=types-jsonschema&package-manager=pip&previous-version=4.25.1.20250822&new-version=4.25.1.20251009)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- poetry.lock | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/poetry.lock b/poetry.lock index ed0374be32..a6de2a86f0 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3035,14 +3035,14 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.25.1.20250822" +version = "4.25.1.20251009" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "types_jsonschema-4.25.1.20250822-py3-none-any.whl", hash = "sha256:f82c2d7fa1ce1c0b84ba1de4ed6798469768188884db04e66421913a4e181294"}, - {file = "types_jsonschema-4.25.1.20250822.tar.gz", hash = "sha256:aac69ed4b23f49aaceb7fcb834141d61b9e4e6a7f6008cb2f0d3b831dfa8464a"}, + {file = "types_jsonschema-4.25.1.20251009-py3-none-any.whl", hash = "sha256:f30b329037b78e7a60146b1146feb0b6fb0b71628637584409bada83968dad3e"}, + {file = "types_jsonschema-4.25.1.20251009.tar.gz", hash = "sha256:75d0f5c5dd18dc23b664437a0c1a625743e8d2e665ceaf3aecb29841f3a5f97f"}, ] [package.dependencies] From 08e1b63b30d889ba16f94d286b453aa79b0fead4 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Mon, 1 Dec 2025 18:26:43 +0000 Subject: [PATCH 35/47] Fix v12 rooms when using frozen dicts (#19235) Fix #19233 Synapse fails to handle events in v12 rooms when the server is run with the `{use_frozen_dicts: True}` config. This PR fixes the issue, and adds tests which cover room creation, joining, and joining over federation, with both frozen and not frozen config settings, by extending the existing `test_send_join` federation tests. This approach to testing was chosen as it is a simple way to get high level integration style test coverage, without going through all our existing tests and trying to retroactively add in coverage when using frozen dicts. This should provide an easy place for future room versions to extend the suite of tests and reduce the chance of introducing subtle bugs like this in the future. ### Pull Request Checklist * [x] Pull request is based on the develop branch * [x] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [x] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- changelog.d/19235.bugfix | 1 + synapse/events/__init__.py | 2 +- tests/federation/test_federation_server.py | 56 ++++++++++++++++++---- 3 files changed, 48 insertions(+), 11 deletions(-) create mode 100644 changelog.d/19235.bugfix diff --git a/changelog.d/19235.bugfix b/changelog.d/19235.bugfix new file mode 100644 index 0000000000..1c312351a4 --- /dev/null +++ b/changelog.d/19235.bugfix @@ -0,0 +1 @@ +Fix v12 rooms when running with `use_frozen_dicts: True`. diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 5f78603782..c7eaf7eda2 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -548,7 +548,7 @@ def auth_event_ids(self) -> StrCollection: assert create_event_id not in self._dict["auth_events"] if self.type == EventTypes.Create and self.get_state_key() == "": return self._dict["auth_events"] # should be [] - return self._dict["auth_events"] + [create_event_id] + return [*self._dict["auth_events"], create_event_id] def _event_type_from_format_version( diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 0d74791290..c4491d5b3c 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -30,6 +30,7 @@ from synapse.api.errors import FederationError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config.server import DEFAULT_ROOM_VERSION +from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import EventBase, make_event_from_dict from synapse.federation.federation_base import event_from_pdu_json from synapse.http.types import QueryParams @@ -356,19 +357,44 @@ def _make_join(self, user_id: str) -> JsonDict: self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) return channel.json_body - def test_send_join(self) -> None: + def _test_send_join_common(self, room_version: str) -> None: """happy-path test of send_join""" + creator_user_id = self.register_user(f"kermit_v{room_version}", "test") + tok = self.login(f"kermit_v{room_version}", "test") + room_id = self.helper.create_room_as( + room_creator=creator_user_id, tok=tok, room_version=room_version + ) + + # Second member joins + second_member_user_id = self.register_user(f"fozzie_v{room_version}", "bear") + tok2 = self.login(f"fozzie_v{room_version}", "bear") + self.helper.join(room_id, second_member_user_id, tok=tok2) + + # Make join for remote user joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME - join_result = self._make_join(joining_user) + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/make_join/{room_id}/{joining_user}?ver={room_version}", + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + join_result = channel.json_body + # Sign and send the join join_event_dict = join_result["event"] self.add_hashes_and_signatures_from_other_server( join_event_dict, - KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + KNOWN_ROOM_VERSIONS[room_version], ) + if room_version in ["1", "2"]: + add_hashes_and_signatures( + KNOWN_ROOM_VERSIONS[room_version], + join_event_dict, + signature_name=self.hs.hostname, + signing_key=self.hs.signing_key, + ) channel = self.make_signed_federation_request( "PUT", - f"/_matrix/federation/v2/send_join/{self._room_id}/x", + f"/_matrix/federation/v2/send_join/{room_id}/x", content=join_event_dict, ) self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) @@ -384,8 +410,8 @@ def test_send_join(self) -> None: ("m.room.power_levels", ""), ("m.room.join_rules", ""), ("m.room.history_visibility", ""), - ("m.room.member", "@kermit:test"), - ("m.room.member", "@fozzie:test"), + ("m.room.member", f"@kermit_v{room_version}:test"), + ("m.room.member", f"@fozzie_v{room_version}:test"), # nb: *not* the joining user ], ) @@ -398,18 +424,28 @@ def test_send_join(self) -> None: returned_auth_chain_events, [ ("m.room.create", ""), - ("m.room.member", "@kermit:test"), + ("m.room.member", f"@kermit_v{room_version}:test"), ("m.room.power_levels", ""), ("m.room.join_rules", ""), ], ) # the room should show that the new user is a member - r = self.get_success( - self._storage_controllers.state.get_current_state(self._room_id) - ) + r = self.get_success(self._storage_controllers.state.get_current_state(room_id)) self.assertEqual(r[("m.room.member", joining_user)].membership, "join") + @parameterized.expand([(k,) for k in KNOWN_ROOM_VERSIONS.keys()]) + @override_config({"use_frozen_dicts": True}) + def test_send_join_with_frozen_dicts(self, room_version: str) -> None: + """Test send_join with USE_FROZEN_DICTS=True""" + self._test_send_join_common(room_version) + + @parameterized.expand([(k,) for k in KNOWN_ROOM_VERSIONS.keys()]) + @override_config({"use_frozen_dicts": False}) + def test_send_join_without_frozen_dicts(self, room_version: str) -> None: + """Test send_join with USE_FROZEN_DICTS=False""" + self._test_send_join_common(room_version) + def test_send_join_partial_state(self) -> None: """/send_join should return partial state, if requested""" joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME From 88310fe7eddde237f50b17b109d1feed4e38b299 Mon Sep 17 00:00:00 2001 From: Eric Eastwood Date: Mon, 1 Dec 2025 17:10:22 -0600 Subject: [PATCH 36/47] Add log to determine whether clients are using `/messages` as expected (#19226) Spawning from wanting some better homeserver logs to debug https://github.com/element-hq/synapse/issues/19153. We can check whether we are returning a `/messages` response with an `end` pagination token and then check to see whether the client is making another `/messages` request with that token. Although clients should also have similar logs and debugging capabilities to determine this info as well. This just makes it easier for us when someone creates an issue claiming backend issue and we can ask them for homeserver logs. --- changelog.d/19226.misc | 1 + synapse/handlers/pagination.py | 116 ++++++++++++++++++++++----------- synapse/rest/admin/rooms.py | 35 +++++++++- synapse/rest/client/room.py | 100 +++++++++++++++++++++++++++- 4 files changed, 210 insertions(+), 42 deletions(-) create mode 100644 changelog.d/19226.misc diff --git a/changelog.d/19226.misc b/changelog.d/19226.misc new file mode 100644 index 0000000000..c38d1d3ef6 --- /dev/null +++ b/changelog.d/19226.misc @@ -0,0 +1 @@ +Add log to determine whether clients are using `/messages` as expected. diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index f869a41c5e..63e5dfa70c 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -21,22 +21,25 @@ import logging from typing import TYPE_CHECKING, cast +import attr + from twisted.python.failure import Failure from synapse.api.constants import Direction, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter -from synapse.events.utils import SerializeEventConfig +from synapse.events import EventBase +from synapse.handlers.relations import BundledAggregations from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging.opentracing import trace from synapse.rest.admin._base import assert_user_is_admin from synapse.streams.config import PaginationConfig from synapse.types import ( - JsonDict, JsonMapping, Requester, ScheduledTask, StreamKeyType, + StreamToken, TaskStatus, ) from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse @@ -70,6 +73,58 @@ SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME = "shutdown_and_purge_room" +@attr.s(slots=True, frozen=True, auto_attribs=True) +class GetMessagesResult: + """ + Everything needed to serialize a `/messages` response. + """ + + messages_chunk: list[EventBase] + """ + A list of room events. + + - When the request is `Direction.FORWARDS`, events will be in the range: + `start_token` < x <= `end_token`, (ascending topological_order) + - When the request is `Direction.BACKWARDS`, events will be in the range: + `start_token` >= x > `end_token`, (descending topological_order) + + Note that an empty chunk does not necessarily imply that no more events are + available. Clients should continue to paginate until no `end_token` property is returned. + """ + + bundled_aggregations: dict[str, BundledAggregations] + """ + A map of event ID to the bundled aggregations for the events in the chunk. + + If an event doesn't have any bundled aggregations, it may not appear in the map. + """ + + state: list[EventBase] | None + """ + A list of state events relevant to showing the chunk. For example, if + lazy_load_members is enabled in the filter then this may contain the membership + events for the senders of events in the chunk. + + Omitted from the response when `None`. + """ + + start_token: StreamToken + """ + Token corresponding to the start of chunk. This will be the same as the value given + in `from` query parameter of the `/messages` request. + """ + + end_token: StreamToken | None + """ + A token corresponding to the end of chunk. This token can be passed back to this + endpoint to request further events. + + If no further events are available (either because we have reached the start of the + timeline, or because the user does not have permission to see any more events), this + property is omitted from the response. + """ + + class PaginationHandler: """Handles pagination and purge history requests. @@ -418,7 +473,7 @@ async def get_messages( as_client_event: bool = True, event_filter: Filter | None = None, use_admin_priviledge: bool = False, - ) -> JsonDict: + ) -> GetMessagesResult: """Get messages in a room. Args: @@ -617,10 +672,13 @@ async def get_messages( # In that case we do not return end, to tell the client # there is no need for further queries. if not events: - return { - "chunk": [], - "start": await from_token.to_string(self.store), - } + return GetMessagesResult( + messages_chunk=[], + bundled_aggregations={}, + state=None, + start_token=from_token, + end_token=None, + ) if event_filter: events = await event_filter.filter(events) @@ -636,11 +694,13 @@ async def get_messages( # if after the filter applied there are no more events # return immediately - but there might be more in next_token batch if not events: - return { - "chunk": [], - "start": await from_token.to_string(self.store), - "end": await next_token.to_string(self.store), - } + return GetMessagesResult( + messages_chunk=[], + bundled_aggregations={}, + state=None, + start_token=from_token, + end_token=next_token, + ) state = None if event_filter and event_filter.lazy_load_members and len(events) > 0: @@ -657,38 +717,20 @@ async def get_messages( if state_ids: state_dict = await self.store.get_events(list(state_ids.values())) - state = state_dict.values() + state = list(state_dict.values()) aggregations = await self._relations_handler.get_bundled_aggregations( events, user_id ) - time_now = self.clock.time_msec() - - serialize_options = SerializeEventConfig( - as_client_event=as_client_event, requester=requester + return GetMessagesResult( + messages_chunk=events, + bundled_aggregations=aggregations, + state=state, + start_token=from_token, + end_token=next_token, ) - chunk = { - "chunk": ( - await self._event_serializer.serialize_events( - events, - time_now, - config=serialize_options, - bundle_aggregations=aggregations, - ) - ), - "start": await from_token.to_string(self.store), - "end": await next_token.to_string(self.store), - } - - if state: - chunk["state"] = await self._event_serializer.serialize_events( - state, time_now, config=serialize_options - ) - - return chunk - async def _shutdown_and_purge_room( self, task: ScheduledTask, diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index cf24bc628a..a886859ffa 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -28,9 +28,13 @@ from synapse.api.constants import Direction, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.api.filtering import Filter +from synapse.events.utils import ( + SerializeEventConfig, +) from synapse.handlers.pagination import ( PURGE_ROOM_ACTION_NAME, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, + GetMessagesResult, ) from synapse.http.servlet import ( ResolveRoomIdMixin, @@ -44,11 +48,13 @@ parse_string, ) from synapse.http.site import SynapseRequest +from synapse.logging.opentracing import trace from synapse.rest.admin._base import ( admin_patterns, assert_requester_is_admin, assert_user_is_admin, ) +from synapse.rest.client.room import SerializeMessagesDeps, encode_messages_response from synapse.storage.databases.main.room import RoomSortOrder from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, RoomID, ScheduledTask, UserID, create_requester @@ -976,6 +982,7 @@ def __init__(self, hs: "HomeServer"): self._pagination_handler = hs.get_pagination_handler() self._auth = hs.get_auth() self._store = hs.get_datastores().main + self._event_serializer = hs.get_event_client_serializer() async def on_GET( self, request: SynapseRequest, room_id: str @@ -999,7 +1006,11 @@ async def on_GET( ): as_client_event = False - msgs = await self._pagination_handler.get_messages( + serialize_options = SerializeEventConfig( + as_client_event=as_client_event, requester=requester + ) + + get_messages_result = await self._pagination_handler.get_messages( room_id=room_id, requester=requester, pagin_config=pagination_config, @@ -1008,7 +1019,27 @@ async def on_GET( use_admin_priviledge=True, ) - return HTTPStatus.OK, msgs + response_content = await self.encode_response( + get_messages_result, serialize_options + ) + + return HTTPStatus.OK, response_content + + @trace + async def encode_response( + self, + get_messages_result: GetMessagesResult, + serialize_options: SerializeEventConfig, + ) -> JsonDict: + return await encode_messages_response( + get_messages_result=get_messages_result, + serialize_options=serialize_options, + serialize_deps=SerializeMessagesDeps( + clock=self._clock, + event_serializer=self._event_serializer, + store=self._store, + ), + ) class RoomTimestampToEventRestServlet(RestServlet): diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 81a6bd57fc..5e7dcb0191 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -28,6 +28,7 @@ from typing import TYPE_CHECKING, Awaitable from urllib import parse as urlparse +import attr from prometheus_client.core import Histogram from twisted.web.server import Request @@ -45,10 +46,12 @@ ) from synapse.api.filtering import Filter from synapse.events.utils import ( + EventClientSerializer, SerializeEventConfig, format_event_for_client_v2, serialize_event, ) +from synapse.handlers.pagination import GetMessagesResult from synapse.http.server import HttpServer from synapse.http.servlet import ( ResolveRoomIdMixin, @@ -64,15 +67,17 @@ ) from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.logging.opentracing import set_tag +from synapse.logging.opentracing import set_tag, trace from synapse.metrics import SERVER_NAME_LABEL from synapse.rest.client._base import client_patterns from synapse.rest.client.transactions import HttpTransactionCache from synapse.state import CREATE_KEY, POWER_KEY +from synapse.storage.databases.main import DataStore from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID from synapse.types.state import StateFilter from synapse.util.cancellation import cancellable +from synapse.util.clock import Clock from synapse.util.events import generate_fake_event_id from synapse.util.stringutils import parse_and_validate_server_name @@ -790,6 +795,56 @@ async def on_GET( return 200, {"joined": users_with_profile} +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SerializeMessagesDeps: + clock: Clock + event_serializer: EventClientSerializer + store: DataStore + + +@trace +async def encode_messages_response( + *, + get_messages_result: GetMessagesResult, + serialize_options: SerializeEventConfig, + serialize_deps: SerializeMessagesDeps, +) -> JsonDict: + """ + Serialize a `GetMessagesResult` into the JSON response format for the `/messages` + endpoint. + + This logic is shared between the client API and Synapse admin API. + """ + + time_now = serialize_deps.clock.time_msec() + + serialized_result = { + "chunk": ( + await serialize_deps.event_serializer.serialize_events( + get_messages_result.messages_chunk, + time_now, + config=serialize_options, + bundle_aggregations=get_messages_result.bundled_aggregations, + ) + ), + "start": await get_messages_result.start_token.to_string(serialize_deps.store), + } + + if get_messages_result.end_token is not None: + serialized_result["end"] = await get_messages_result.end_token.to_string( + serialize_deps.store + ) + + if get_messages_result.state is not None: + serialized_result[ + "state" + ] = await serialize_deps.event_serializer.serialize_events( + get_messages_result.state, time_now, config=serialize_options + ) + + return serialized_result + + # TODO: Needs better unit testing class RoomMessageListRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/messages$", v1=True) @@ -806,6 +861,7 @@ def __init__(self, hs: "HomeServer"): self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() self.store = hs.get_datastores().main + self.event_serializer = hs.get_event_client_serializer() async def on_GET( self, request: SynapseRequest, room_id: str @@ -839,7 +895,11 @@ async def on_GET( ): as_client_event = False - msgs = await self.pagination_handler.get_messages( + serialize_options = SerializeEventConfig( + as_client_event=as_client_event, requester=requester + ) + + get_messages_result = await self.pagination_handler.get_messages( room_id=room_id, requester=requester, pagin_config=pagination_config, @@ -847,6 +907,24 @@ async def on_GET( event_filter=event_filter, ) + # Useful for debugging timeline/pagination issues. For example, if a client + # isn't seeing the full history, we can check the homeserver logs to see if the + # client just never made the next request with the given `end` token. + logger.info( + "Responding to `/messages` request: {%s} %s %s -> %d messages with end_token=%s", + requester.user.to_string(), + request.get_method(), + request.get_redacted_uri(), + len(get_messages_result.messages_chunk), + (await get_messages_result.end_token.to_string(self.store)) + if get_messages_result.end_token + else None, + ) + + response_content = await self.encode_response( + get_messages_result, serialize_options + ) + processing_end_time = self.clock.time_msec() room_member_count = await make_deferred_yieldable(room_member_count_deferred) messsages_response_timer.labels( @@ -854,7 +932,23 @@ async def on_GET( **{SERVER_NAME_LABEL: self.server_name}, ).observe((processing_end_time - processing_start_time) / 1000) - return 200, msgs + return 200, response_content + + @trace + async def encode_response( + self, + get_messages_result: GetMessagesResult, + serialize_options: SerializeEventConfig, + ) -> JsonDict: + return await encode_messages_response( + get_messages_result=get_messages_result, + serialize_options=serialize_options, + serialize_deps=SerializeMessagesDeps( + clock=self.clock, + event_serializer=self.event_serializer, + store=self.store, + ), + ) # TODO: Needs unit testing From a8e5c319ab952db39b74cba87e9a7344bffdf204 Mon Sep 17 00:00:00 2001 From: Patrick Maier Date: Tue, 2 Dec 2025 12:09:18 +0100 Subject: [PATCH 37/47] Simplify README and add ESS Getting started section (#19228) - Add reference to ESS for getting started easily with Synapse/Matrix/Element - Remove details on standalone installations and link to the docs - Other updates like copyright --- README.rst | 202 +++++++++++------------------------------ changelog.d/19228.misc | 1 + 2 files changed, 55 insertions(+), 148 deletions(-) create mode 100644 changelog.d/19228.misc diff --git a/README.rst b/README.rst index d10b662d1a..9ea397cd0f 100644 --- a/README.rst +++ b/README.rst @@ -7,81 +7,61 @@ Synapse is an open source `Matrix `__ homeserver implementation, written and maintained by `Element `_. -`Matrix `__ is the open standard for -secure and interoperable real-time communications. You can directly run -and manage the source code in this repository, available under an AGPL -license (or alternatively under a commercial license from Element). -There is no support provided by Element unless you have a -subscription from Element. +`Matrix `__ is the open standard for secure and +interoperable real-time communications. You can directly run and manage the +source code in this repository, available under an AGPL license (or +alternatively under a commercial license from Element). -Subscription -============ +There is no support provided by Element unless you have a subscription from +Element. -For those that need an enterprise-ready solution, Element -Server Suite (ESS) is `available via subscription `_. -ESS builds on Synapse to offer a complete Matrix-based backend including the full -`Admin Console product `_, -giving admins the power to easily manage an organization-wide -deployment. It includes advanced identity management, auditing, -moderation and data retention options as well as Long-Term Support and -SLAs. ESS supports any Matrix-compatible client. +🚀 Getting started +================== -.. contents:: +This component is developed and maintained by `Element `_. +It gets shipped as part of the **Element Server Suite (ESS)** which provides the +official means of deployment. -🛠️ Installation and configuration -================================== +ESS is a Matrix distribution from Element with focus on quality and ease of use. +It ships a full Matrix stack tailored to the respective use case. -The Synapse documentation describes `how to install Synapse `_. We recommend using -`Docker images `_ or `Debian packages from Matrix.org -`_. +There are three editions of ESS: -.. _federation: +- `ESS Community `_ - the free Matrix + distribution from Element tailored to small-/mid-scale, non-commercial + community use cases +- `ESS Pro `_ - the commercial Matrix + distribution from Element for professional use +- `ESS TI-M `_ - a special version + of ESS Pro focused on the requirements of TI-Messenger Pro and ePA as + specified by the German National Digital Health Agency Gematik -Synapse has a variety of `config options -`_ -which can be used to customise its behaviour after installation. -There are additional details on how to `configure Synapse for federation here -`_. -.. _reverse-proxy: +🛠️ Standalone installation and configuration +============================================ -Using a reverse proxy with Synapse ----------------------------------- - -It is recommended to put a reverse proxy such as -`nginx `_, -`Apache `_, -`Caddy `_, -`HAProxy `_ or -`relayd `_ in front of Synapse. One advantage of -doing so is that it means that you can expose the default https port (443) to -Matrix clients without needing to run Synapse with root privileges. -For information on configuring one, see `the reverse proxy docs -`_. - -Upgrading an existing Synapse ------------------------------ - -The instructions for upgrading Synapse are in `the upgrade notes`_. -Please check these instructions as upgrading may require extra steps for some -versions of Synapse. - -.. _the upgrade notes: https://element-hq.github.io/synapse/develop/upgrade.html +The Synapse documentation describes `options for installing Synapse standalone +`_. See +below for more useful documenation links. +- `Synapse configuration options `_ +- `Synapse configuration for federation `_ +- `Using a reverse proxy with Synapse `_ +- `Upgrading Synapse `_ Platform dependencies --------------------- Synapse uses a number of platform dependencies such as Python and PostgreSQL, -and aims to follow supported upstream versions. See the -`deprecation policy `_ -for more details. +and aims to follow supported upstream versions. See the `deprecation policy +`_ for more +details. Security note ------------- -Matrix serves raw, user-supplied data in some APIs -- specifically the `content +Matrix serves raw, user-supplied data in some APIs — specifically the `content repository endpoints`_. .. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid @@ -118,60 +98,6 @@ mentioned in MXIDs hosted on that server. Following this advice ensures that even if an XSS is found in Synapse, the impact to other applications will be minimal. - -🧪 Testing a new installation -============================= - -The easiest way to try out your new Synapse installation is by connecting to it -from a web client. - -Unless you are running a test instance of Synapse on your local machine, in -general, you will need to enable TLS support before you can successfully -connect from a client: see -`TLS certificates `_. - -An easy way to get started is to login or register via Element at -https://app.element.io/#/login or https://app.element.io/#/register respectively. -You will need to change the server you are logging into from ``matrix.org`` -and instead specify a homeserver URL of ``https://:8448`` -(or just ``https://`` if you are using a reverse proxy). -If you prefer to use another client, refer to our -`client breakdown `_. - -If all goes well you should at least be able to log in, create a room, and -start sending messages. - -.. _`client-user-reg`: - -Registering a new user from a client ------------------------------------- - -By default, registration of new users via Matrix clients is disabled. To enable -it: - -1. In the - `registration config section `_ - set ``enable_registration: true`` in ``homeserver.yaml``. -2. Then **either**: - - a. set up a `CAPTCHA `_, or - b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``. - -We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to -the public internet. Without it, anyone can freely register accounts on your homeserver. -This can be exploited by attackers to create spambots targeting the rest of the Matrix -federation. - -Your new Matrix ID will be formed partly from the ``server_name``, and partly -from a localpart you specify when you create the account in the form of:: - - @localpart:my.domain.name - -(pronounced "at localpart on my dot domain dot name"). - -As when logging in, you will need to specify a "Custom server". Specify your -desired ``localpart`` in the 'Username' box. - 🎯 Troubleshooting and support ============================== @@ -182,7 +108,7 @@ Enterprise quality support for Synapse including SLAs is available as part of an `Element Server Suite (ESS) `_ subscription. If you are an existing ESS subscriber then you can raise a `support request `_ -and access the `knowledge base `_. +and access the `Element product documentation `_. 🤝 Community support -------------------- @@ -201,35 +127,6 @@ issues for support requests, only for bug reports and feature requests. .. |docs| replace:: ``docs`` .. _docs: docs -🪪 Identity Servers -=================== - -Identity servers have the job of mapping email addresses and other 3rd Party -IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs -before creating that mapping. - -**Identity servers do not store accounts or credentials - these are stored and managed on homeservers. -Identity Servers are just for mapping 3rd Party IDs to Matrix IDs.** - -This process is highly security-sensitive, as there is an obvious risk of spam if it -is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer -term, we hope to create a decentralised system to manage it (`matrix-doc #712 -`_), but in the meantime, -the role of managing trusted identity in the Matrix ecosystem is farmed out to -a cluster of known trusted ecosystem partners, who run 'Matrix Identity -Servers' such as `Sydent `_, whose role -is purely to authenticate and track 3PID logins and publish end-user public -keys. - -You can host your own copy of Sydent, but this will prevent you reaching other -users in the Matrix ecosystem via their email address, and prevent them finding -you. We therefore recommend that you use one of the centralised identity servers -at ``https://matrix.org`` or ``https://vector.im`` for now. - -To reiterate: the Identity server will only be used if you choose to associate -an email address with your account, or send an invite to another user via their -email address. - 🛠️ Development ============== @@ -252,20 +149,29 @@ Alongside all that, join our developer community on Matrix: Copyright and Licensing ======================= -| Copyright 2014-2017 OpenMarket Ltd -| Copyright 2017 Vector Creations Ltd -| Copyright 2017-2025 New Vector Ltd -| + | Copyright 2014–2017 OpenMarket Ltd + | Copyright 2017 Vector Creations Ltd + | Copyright 2017–2025 New Vector Ltd + | Copyright 2025 Element Creations Ltd -This software is dual-licensed by New Vector Ltd (Element). It can be used either: +This software is dual-licensed by Element Creations Ltd (Element). It can be +used either: -(1) for free under the terms of the GNU Affero General Public License (as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version); OR +(1) for free under the terms of the GNU Affero General Public License (as + published by the Free Software Foundation, either version 3 of the License, + or (at your option) any later version); OR -(2) under the terms of a paid-for Element Commercial License agreement between you and Element (the terms of which may vary depending on what you and Element have agreed to). +(2) under the terms of a paid-for Element Commercial License agreement between + you and Element (the terms of which may vary depending on what you and + Element have agreed to). -Unless required by applicable law or agreed to in writing, software distributed under the Licenses is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the Licenses for the specific language governing permissions and limitations under the Licenses. +Unless required by applicable law or agreed to in writing, software distributed +under the Licenses is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR +CONDITIONS OF ANY KIND, either express or implied. See the Licenses for the +specific language governing permissions and limitations under the Licenses. -Please contact `licensing@element.io `_ to purchase an Element commercial license for this software. +Please contact `licensing@element.io `_ to purchase +an Element commercial license for this software. .. |support| image:: https://img.shields.io/badge/matrix-community%20support-success diff --git a/changelog.d/19228.misc b/changelog.d/19228.misc new file mode 100644 index 0000000000..bee72bff9a --- /dev/null +++ b/changelog.d/19228.misc @@ -0,0 +1 @@ +Simplify README and add ESS Getting started section. From 022e56cce34e0b383dfe2ada426da36c67ff92ad Mon Sep 17 00:00:00 2001 From: Erik Johnston Date: Tue, 2 Dec 2025 14:25:12 +0000 Subject: [PATCH 38/47] Move security note from README into the docs (#19259) This is a) to simplify the README and b) so that we can easily link to the security page from e.g. the installation guide. Follows on from https://github.com/element-hq/synapse/pull/19228 --- README.rst | 50 +------------------------------------- changelog.d/19259.misc | 1 + docs/SUMMARY.md | 1 + docs/setup/installation.md | 7 ++++++ docs/setup/security.md | 41 +++++++++++++++++++++++++++++++ 5 files changed, 51 insertions(+), 49 deletions(-) create mode 100644 changelog.d/19259.misc create mode 100644 docs/setup/security.md diff --git a/README.rst b/README.rst index 9ea397cd0f..e95aa0f8aa 100644 --- a/README.rst +++ b/README.rst @@ -42,61 +42,13 @@ There are three editions of ESS: The Synapse documentation describes `options for installing Synapse standalone `_. See -below for more useful documenation links. +below for more useful documentation links. - `Synapse configuration options `_ - `Synapse configuration for federation `_ - `Using a reverse proxy with Synapse `_ - `Upgrading Synapse `_ -Platform dependencies ---------------------- - -Synapse uses a number of platform dependencies such as Python and PostgreSQL, -and aims to follow supported upstream versions. See the `deprecation policy -`_ for more -details. - - -Security note -------------- - -Matrix serves raw, user-supplied data in some APIs — specifically the `content -repository endpoints`_. - -.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid - -Whilst we make a reasonable effort to mitigate against XSS attacks (for -instance, by using `CSP`_), a Matrix homeserver should not be hosted on a -domain hosting other web applications. This especially applies to sharing -the domain with Matrix web clients and other sensitive applications like -webmail. See -https://developer.github.com/changes/2014-04-25-user-content-security for more -information. - -.. _CSP: https://github.com/matrix-org/synapse/pull/1021 - -Ideally, the homeserver should not simply be on a different subdomain, but on -a completely different `registered domain`_ (also known as top-level site or -eTLD+1). This is because `some attacks`_ are still possible as long as the two -applications share the same registered domain. - -.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3 - -.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie - -To illustrate this with an example, if your Element Web or other sensitive web -application is hosted on ``A.example1.com``, you should ideally host Synapse on -``example2.com``. Some amount of protection is offered by hosting on -``B.example1.com`` instead, so this is also acceptable in some scenarios. -However, you should *not* host your Synapse on ``A.example1.com``. - -Note that all of the above refers exclusively to the domain used in Synapse's -``public_baseurl`` setting. In particular, it has no bearing on the domain -mentioned in MXIDs hosted on that server. - -Following this advice ensures that even if an XSS is found in Synapse, the -impact to other applications will be minimal. 🎯 Troubleshooting and support ============================== diff --git a/changelog.d/19259.misc b/changelog.d/19259.misc new file mode 100644 index 0000000000..bee72bff9a --- /dev/null +++ b/changelog.d/19259.misc @@ -0,0 +1 @@ +Simplify README and add ESS Getting started section. diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index 926a6eb848..980f51d078 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -5,6 +5,7 @@ # Setup - [Installation](setup/installation.md) + - [Security](setup/security.md) - [Using Postgres](postgres.md) - [Configuring a Reverse Proxy](reverse_proxy.md) - [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md) diff --git a/docs/setup/installation.md b/docs/setup/installation.md index 786672c689..a48662362a 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -16,8 +16,15 @@ that your email address is probably `user@example.com` rather than `user@email.example.com`) - but doing so may require more advanced setup: see [Setting up Federation](../federate.md). +⚠️ Before setting up Synapse please consult the [security page](security.md) for +best practices. ⚠️ + ## Installing Synapse +Note: Synapse uses a number of platform dependencies such as Python and PostgreSQL, +and aims to follow supported upstream versions. See the [deprecation +policy](../deprecation_policy.md) for more details. + ### Prebuilt packages Prebuilt packages are available for a number of platforms. These are recommended diff --git a/docs/setup/security.md b/docs/setup/security.md new file mode 100644 index 0000000000..2c21b494e5 --- /dev/null +++ b/docs/setup/security.md @@ -0,0 +1,41 @@ +# Security + +This page lays out security best-practices when running Synapse. + +If you believe you have encountered a security issue, see our [Security +Disclosure Policy](https://element.io/en/security/security-disclosure-policy). + +## Content repository + +Matrix serves raw, user-supplied data in some APIs — specifically the [content +repository endpoints](https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid). + +Whilst we make a reasonable effort to mitigate against XSS attacks (for +instance, by using [CSP](https://github.com/matrix-org/synapse/pull/1021)), a +Matrix homeserver should not be hosted on a domain hosting other web +applications. This especially applies to sharing the domain with Matrix web +clients and other sensitive applications like webmail. See +https://developer.github.com/changes/2014-04-25-user-content-security for more +information. + +Ideally, the homeserver should not simply be on a different subdomain, but on a +completely different [registered +domain](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3) +(also known as top-level site or eTLD+1). This is because [some +attacks](https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie) +are still possible as long as the two applications share the same registered +domain. + + +To illustrate this with an example, if your Element Web or other sensitive web +application is hosted on `A.example1.com`, you should ideally host Synapse on +`example2.com`. Some amount of protection is offered by hosting on +`B.example1.com` instead, so this is also acceptable in some scenarios. +However, you should *not* host your Synapse on `A.example1.com`. + +Note that all of the above refers exclusively to the domain used in Synapse's +`public_baseurl` setting. In particular, it has no bearing on the domain +mentioned in MXIDs hosted on that server. + +Following this advice ensures that even if an XSS is found in Synapse, the +impact to other applications will be minimal. From 2862c77837cf806230965897d74ccb31e94c6a99 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 2 Dec 2025 15:59:27 +0000 Subject: [PATCH 39/47] Remove macos wheels from CI (#19263) Follows #19225 and stops building macos wheels in CI. ### Pull Request Checklist * [X] Pull request is based on the develop branch * [X] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [X] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- .github/workflows/release-artifacts.yml | 7 ------- changelog.d/19263.misc | 1 + pyproject.toml | 4 ---- 3 files changed, 1 insertion(+), 11 deletions(-) create mode 100644 changelog.d/19263.misc diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index 03c2b0a326..531680b989 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -114,19 +114,12 @@ jobs: os: - ubuntu-24.04 - ubuntu-24.04-arm - - macos-14 # This uses arm64 - - macos-15-intel # This uses x86-64 # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. is_pr: - ${{ startsWith(github.ref, 'refs/pull/') }} exclude: - # Don't build macos wheels on PR CI. - - is_pr: true - os: "macos-15-intel" - - is_pr: true - os: "macos-14" # Don't build aarch64 wheels on PR CI. - is_pr: true os: "ubuntu-24.04-arm" diff --git a/changelog.d/19263.misc b/changelog.d/19263.misc new file mode 100644 index 0000000000..62b7594108 --- /dev/null +++ b/changelog.d/19263.misc @@ -0,0 +1 @@ +Stop building macos wheels in CI pipeline. diff --git a/pyproject.toml b/pyproject.toml index 5ee843365d..8cee32ecfa 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -424,7 +424,3 @@ test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print [tool.cibuildwheel.linux] # Wrap the repair command to correctly rename the built cpython wheels as ABI3. repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}" - -[tool.cibuildwheel.macos] -# Wrap the repair command to correctly rename the built cpython wheels as ABI3. -repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}" From 5fe4b7ed60d22f7d7b5c50c4c74f1785c65943df Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 2 Dec 2025 09:21:08 -0700 Subject: [PATCH 40/47] 1.144.0rc1 --- CHANGES.md | 60 +++++++++++++++++++++++++++++++ changelog.d/17782.misc | 1 - changelog.d/18960.bugfix | 1 - changelog.d/19203.feature | 1 - changelog.d/19207.feature | 1 - changelog.d/19208.misc | 1 - changelog.d/19209.misc | 1 - changelog.d/19211.misc | 1 - changelog.d/19219.misc | 1 - changelog.d/19220.misc | 1 - changelog.d/19221.misc | 1 - changelog.d/19223.misc | 1 - changelog.d/19224.misc | 1 - changelog.d/19225.removal | 1 - changelog.d/19226.misc | 1 - changelog.d/19228.misc | 1 - changelog.d/19229.misc | 1 - changelog.d/19230.misc | 1 - changelog.d/19235.bugfix | 1 - changelog.d/19239.misc | 1 - changelog.d/19240.bugfix | 1 - changelog.d/19243.doc | 1 - changelog.d/19251.misc | 1 - changelog.d/19259.misc | 1 - changelog.d/19263.misc | 1 - debian/changelog | 6 ++++ pyproject.toml | 2 +- schema/synapse-config.schema.yaml | 2 +- 28 files changed, 68 insertions(+), 26 deletions(-) delete mode 100644 changelog.d/17782.misc delete mode 100644 changelog.d/18960.bugfix delete mode 100644 changelog.d/19203.feature delete mode 100644 changelog.d/19207.feature delete mode 100644 changelog.d/19208.misc delete mode 100644 changelog.d/19209.misc delete mode 100644 changelog.d/19211.misc delete mode 100644 changelog.d/19219.misc delete mode 100644 changelog.d/19220.misc delete mode 100644 changelog.d/19221.misc delete mode 100644 changelog.d/19223.misc delete mode 100644 changelog.d/19224.misc delete mode 100644 changelog.d/19225.removal delete mode 100644 changelog.d/19226.misc delete mode 100644 changelog.d/19228.misc delete mode 100644 changelog.d/19229.misc delete mode 100644 changelog.d/19230.misc delete mode 100644 changelog.d/19235.bugfix delete mode 100644 changelog.d/19239.misc delete mode 100644 changelog.d/19240.bugfix delete mode 100644 changelog.d/19243.doc delete mode 100644 changelog.d/19251.misc delete mode 100644 changelog.d/19259.misc delete mode 100644 changelog.d/19263.misc diff --git a/CHANGES.md b/CHANGES.md index 53d0ae3674..a58f3cea21 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,63 @@ +# synapse 1.144.0rc1 (2025-12-02) + +Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`), please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11440) as this release contains changes that disable that endpoint by default. + +## Features + +- Add experimentatal implememntation of [MSC4380](https://github.com/matrix-org/matrix-spec-proposals/pull/4380) (invite blocking). ([\#19203](https://github.com/element-hq/synapse/issues/19203)) +- Allow restarting delayed event timeouts on workers. ([\#19207](https://github.com/element-hq/synapse/issues/19207)) + +## Bugfixes + +- Fix a bug in the database function for fetching state deltas that could result in unnecessarily long query times. ([\#18960](https://github.com/element-hq/synapse/issues/18960)) +- Fix v12 rooms when running with `use_frozen_dicts: True`. ([\#19235](https://github.com/element-hq/synapse/issues/19235)) +- Fix bug where invalid `canonical_alias` content would return 500 instead of 400. ([\#19240](https://github.com/element-hq/synapse/issues/19240)) + +## Improved Documentation + +- Document in the `--config-path` help how multiple files are merged - by merging them shallowly. ([\#19243](https://github.com/element-hq/synapse/issues/19243)) + +## Deprecations and Removals + +- Stop building release wheels for MacOS. ([\#19225](https://github.com/element-hq/synapse/issues/19225)) + +## Internal Changes + +- Improve event filtering for Simplified Sliding Sync. ([\#17782](https://github.com/element-hq/synapse/issues/17782)) +- Export `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` environment variable from `scripts-dev/complement.sh`. ([\#19208](https://github.com/element-hq/synapse/issues/19208)) +- Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable). ([\#19209](https://github.com/element-hq/synapse/issues/19209)) +- Expire sliding sync connections that are too old or have too much pending data. ([\#19211](https://github.com/element-hq/synapse/issues/19211)) +- Require an experimental feature flag to be enabled in order for the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) to be available. ([\#19219](https://github.com/element-hq/synapse/issues/19219)) +- Prevent changelog check CI running on @dependabot's PRs even when a human has modified the branch. ([\#19220](https://github.com/element-hq/synapse/issues/19220)) +- Auto-fix trailing spaces in multi-line strings and comments when running the lint script. ([\#19221](https://github.com/element-hq/synapse/issues/19221)) +- Move towards using a dedicated `Duration` type. ([\#19223](https://github.com/element-hq/synapse/issues/19223), [\#19229](https://github.com/element-hq/synapse/issues/19229)) +- Improve robustness of the SQL schema linting in CI. ([\#19224](https://github.com/element-hq/synapse/issues/19224)) +- Add log to determine whether clients are using `/messages` as expected. ([\#19226](https://github.com/element-hq/synapse/issues/19226)) +- Simplify README and add ESS Getting started section. ([\#19228](https://github.com/element-hq/synapse/issues/19228), [\#19259](https://github.com/element-hq/synapse/issues/19259)) +- Add a unit test for ensuring associated refresh tokens are erased when a device is deleted. ([\#19230](https://github.com/element-hq/synapse/issues/19230)) +- Prompt user to consider adding future deprecations to the changelog in release script. ([\#19239](https://github.com/element-hq/synapse/issues/19239)) +- Fix check of the Rust compiled code being outdated when using source checkout and `.egg-info`. ([\#19251](https://github.com/element-hq/synapse/issues/19251)) +- Stop building macos wheels in CI pipeline. ([\#19263](https://github.com/element-hq/synapse/issues/19263)) + + + +### Updates to locked dependencies + +* Bump Swatinem/rust-cache from 2.8.1 to 2.8.2. ([\#19244](https://github.com/element-hq/synapse/issues/19244)) +* Bump actions/checkout from 5.0.0 to 6.0.0. ([\#19213](https://github.com/element-hq/synapse/issues/19213)) +* Bump actions/setup-go from 6.0.0 to 6.1.0. ([\#19214](https://github.com/element-hq/synapse/issues/19214)) +* Bump actions/setup-python from 6.0.0 to 6.1.0. ([\#19245](https://github.com/element-hq/synapse/issues/19245)) +* Bump attrs from 25.3.0 to 25.4.0. ([\#19215](https://github.com/element-hq/synapse/issues/19215)) +* Bump docker/metadata-action from 5.9.0 to 5.10.0. ([\#19246](https://github.com/element-hq/synapse/issues/19246)) +* Bump http from 1.3.1 to 1.4.0. ([\#19249](https://github.com/element-hq/synapse/issues/19249)) +* Bump pydantic from 2.12.4 to 2.12.5. ([\#19250](https://github.com/element-hq/synapse/issues/19250)) +* Bump pyopenssl from 25.1.0 to 25.3.0. ([\#19248](https://github.com/element-hq/synapse/issues/19248)) +* Bump rpds-py from 0.28.0 to 0.29.0. ([\#19216](https://github.com/element-hq/synapse/issues/19216)) +* Bump rpds-py from 0.29.0 to 0.30.0. ([\#19247](https://github.com/element-hq/synapse/issues/19247)) +* Bump sentry-sdk from 2.44.0 to 2.46.0. ([\#19218](https://github.com/element-hq/synapse/issues/19218)) +* Bump types-bleach from 6.2.0.20250809 to 6.3.0.20251115. ([\#19217](https://github.com/element-hq/synapse/issues/19217)) +* Bump types-jsonschema from 4.25.1.20250822 to 4.25.1.20251009. ([\#19252](https://github.com/element-hq/synapse/issues/19252)) + # Synapse 1.143.0 (2025-11-25) ## Dropping support for PostgreSQL 13 diff --git a/changelog.d/17782.misc b/changelog.d/17782.misc deleted file mode 100644 index d7321470d0..0000000000 --- a/changelog.d/17782.misc +++ /dev/null @@ -1 +0,0 @@ -Improve event filtering for Simplified Sliding Sync. \ No newline at end of file diff --git a/changelog.d/18960.bugfix b/changelog.d/18960.bugfix deleted file mode 100644 index 909089f809..0000000000 --- a/changelog.d/18960.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix a bug in the database function for fetching state deltas that could result in unnecessarily long query times. \ No newline at end of file diff --git a/changelog.d/19203.feature b/changelog.d/19203.feature deleted file mode 100644 index d192781b20..0000000000 --- a/changelog.d/19203.feature +++ /dev/null @@ -1 +0,0 @@ -Add experimentatal implememntation of [MSC4380](https://github.com/matrix-org/matrix-spec-proposals/pull/4380) (invite blocking). diff --git a/changelog.d/19207.feature b/changelog.d/19207.feature deleted file mode 100644 index e64562c350..0000000000 --- a/changelog.d/19207.feature +++ /dev/null @@ -1 +0,0 @@ -Allow restarting delayed event timeouts on workers. diff --git a/changelog.d/19208.misc b/changelog.d/19208.misc deleted file mode 100644 index 1948be309b..0000000000 --- a/changelog.d/19208.misc +++ /dev/null @@ -1 +0,0 @@ -Export `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` environment variable from `scripts-dev/complement.sh`. diff --git a/changelog.d/19209.misc b/changelog.d/19209.misc deleted file mode 100644 index e64ca85d1d..0000000000 --- a/changelog.d/19209.misc +++ /dev/null @@ -1 +0,0 @@ -Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable). diff --git a/changelog.d/19211.misc b/changelog.d/19211.misc deleted file mode 100644 index d8a4a44662..0000000000 --- a/changelog.d/19211.misc +++ /dev/null @@ -1 +0,0 @@ -Expire sliding sync connections that are too old or have too much pending data. diff --git a/changelog.d/19219.misc b/changelog.d/19219.misc deleted file mode 100644 index 8355729358..0000000000 --- a/changelog.d/19219.misc +++ /dev/null @@ -1 +0,0 @@ -Require an experimental feature flag to be enabled in order for the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) to be available. \ No newline at end of file diff --git a/changelog.d/19220.misc b/changelog.d/19220.misc deleted file mode 100644 index e98f5ade61..0000000000 --- a/changelog.d/19220.misc +++ /dev/null @@ -1 +0,0 @@ -Prevent changelog check CI running on @dependabot's PRs even when a human has modified the branch. \ No newline at end of file diff --git a/changelog.d/19221.misc b/changelog.d/19221.misc deleted file mode 100644 index d1faf9cb72..0000000000 --- a/changelog.d/19221.misc +++ /dev/null @@ -1 +0,0 @@ -Auto-fix trailing spaces in multi-line strings and comments when running the lint script. \ No newline at end of file diff --git a/changelog.d/19223.misc b/changelog.d/19223.misc deleted file mode 100644 index 8caebead72..0000000000 --- a/changelog.d/19223.misc +++ /dev/null @@ -1 +0,0 @@ -Move towards using a dedicated `Duration` type. diff --git a/changelog.d/19224.misc b/changelog.d/19224.misc deleted file mode 100644 index 3f8f630c5e..0000000000 --- a/changelog.d/19224.misc +++ /dev/null @@ -1 +0,0 @@ -Improve robustness of the SQL schema linting in CI. diff --git a/changelog.d/19225.removal b/changelog.d/19225.removal deleted file mode 100644 index bed5db07e8..0000000000 --- a/changelog.d/19225.removal +++ /dev/null @@ -1 +0,0 @@ -Stop building release wheels for MacOS. \ No newline at end of file diff --git a/changelog.d/19226.misc b/changelog.d/19226.misc deleted file mode 100644 index c38d1d3ef6..0000000000 --- a/changelog.d/19226.misc +++ /dev/null @@ -1 +0,0 @@ -Add log to determine whether clients are using `/messages` as expected. diff --git a/changelog.d/19228.misc b/changelog.d/19228.misc deleted file mode 100644 index bee72bff9a..0000000000 --- a/changelog.d/19228.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify README and add ESS Getting started section. diff --git a/changelog.d/19229.misc b/changelog.d/19229.misc deleted file mode 100644 index 8caebead72..0000000000 --- a/changelog.d/19229.misc +++ /dev/null @@ -1 +0,0 @@ -Move towards using a dedicated `Duration` type. diff --git a/changelog.d/19230.misc b/changelog.d/19230.misc deleted file mode 100644 index 06704db25b..0000000000 --- a/changelog.d/19230.misc +++ /dev/null @@ -1 +0,0 @@ -Add a unit test for ensuring associated refresh tokens are erased when a device is delted. \ No newline at end of file diff --git a/changelog.d/19235.bugfix b/changelog.d/19235.bugfix deleted file mode 100644 index 1c312351a4..0000000000 --- a/changelog.d/19235.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix v12 rooms when running with `use_frozen_dicts: True`. diff --git a/changelog.d/19239.misc b/changelog.d/19239.misc deleted file mode 100644 index fd5757eb77..0000000000 --- a/changelog.d/19239.misc +++ /dev/null @@ -1 +0,0 @@ -Prompt user to consider adding future deprecations to the changelog in release script. diff --git a/changelog.d/19240.bugfix b/changelog.d/19240.bugfix deleted file mode 100644 index d8490bcc1f..0000000000 --- a/changelog.d/19240.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where invalid `canonical_alias` content would return 500 instead of 400. diff --git a/changelog.d/19243.doc b/changelog.d/19243.doc deleted file mode 100644 index 3a396c88d5..0000000000 --- a/changelog.d/19243.doc +++ /dev/null @@ -1 +0,0 @@ -Document in the `--config-path` help how multiple files are merged - by merging them shallowly. diff --git a/changelog.d/19251.misc b/changelog.d/19251.misc deleted file mode 100644 index 9d0501c3d4..0000000000 --- a/changelog.d/19251.misc +++ /dev/null @@ -1 +0,0 @@ -Fix check of the Rust compiled code being outdated when using source checkout and `.egg-info`. diff --git a/changelog.d/19259.misc b/changelog.d/19259.misc deleted file mode 100644 index bee72bff9a..0000000000 --- a/changelog.d/19259.misc +++ /dev/null @@ -1 +0,0 @@ -Simplify README and add ESS Getting started section. diff --git a/changelog.d/19263.misc b/changelog.d/19263.misc deleted file mode 100644 index 62b7594108..0000000000 --- a/changelog.d/19263.misc +++ /dev/null @@ -1 +0,0 @@ -Stop building macos wheels in CI pipeline. diff --git a/debian/changelog b/debian/changelog index f8cf0c86f9..901c210d60 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.144.0~rc1) stable; urgency=medium + + * New Synapse release 1.144.0rc1. + + -- Synapse Packaging team Tue, 02 Dec 2025 09:11:19 -0700 + matrix-synapse-py3 (1.143.0) stable; urgency=medium * New Synapse release 1.143.0. diff --git a/pyproject.toml b/pyproject.toml index 8cee32ecfa..ce2ecf6363 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "matrix-synapse" -version = "1.143.0" +version = "1.144.0rc1" description = "Homeserver for the Matrix decentralised comms protocol" readme = "README.rst" authors = [ diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml index f2d51fdb95..bf9346995d 100644 --- a/schema/synapse-config.schema.yaml +++ b/schema/synapse-config.schema.yaml @@ -1,5 +1,5 @@ $schema: https://element-hq.github.io/synapse/latest/schema/v1/meta.schema.json -$id: https://element-hq.github.io/synapse/schema/synapse/v1.143/synapse-config.schema.json +$id: https://element-hq.github.io/synapse/schema/synapse/v1.144/synapse-config.schema.json type: object properties: modules: From 4cd05baaecb4eb93c64f41785f5224160f388ef1 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 2 Dec 2025 20:08:32 +0000 Subject: [PATCH 41/47] Fix bug where `Duration` was logged incorrectly (#19267) ### Pull Request Checklist * [X] Pull request is based on the develop branch * [X] Pull request includes a [changelog file](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#changelog). The entry should: - Be a short description of your change which makes sense to users. "Fixed a bug that prevented receiving messages from other servers." instead of "Moved X method from `EventStore` to `EventWorkerStore`.". - Use markdown where necessary, mostly for `code blocks`. - End with either a period (.) or an exclamation mark (!). - Start with a capital letter. - Feel free to credit yourself, by adding a sentence "Contributed by @github_username." or "Contributed by [Your Name]." to the end of the entry. * [X] [Code style](https://element-hq.github.io/synapse/latest/code_style.html) is correct (run the [linters](https://element-hq.github.io/synapse/latest/development/contributing_guide.html#run-the-linters)) --- changelog.d/19267.bugfix | 1 + synapse/appservice/scheduler.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) create mode 100644 changelog.d/19267.bugfix diff --git a/changelog.d/19267.bugfix b/changelog.d/19267.bugfix new file mode 100644 index 0000000000..6c7ed750ec --- /dev/null +++ b/changelog.d/19267.bugfix @@ -0,0 +1 @@ +Fix bug where `Duration` was logged incorrectly. diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index befb4ae44b..c3a83d140c 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -506,7 +506,7 @@ def __init__( def recover(self) -> None: delay = Duration(seconds=2**self.backoff_counter) - logger.info("Scheduling retries on %s in %fs", self.service.id, delay) + logger.info("Scheduling retries on %s in %fs", self.service.id, delay.as_secs()) self.scheduled_recovery = self.clock.call_later( delay, self.hs.run_as_background_process, From 989c4d2585003a7245dff1f5d3131fbbfaad2488 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 2 Dec 2025 13:11:50 -0700 Subject: [PATCH 42/47] Update changelog --- CHANGES.md | 3 ++- changelog.d/19267.bugfix | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) delete mode 100644 changelog.d/19267.bugfix diff --git a/CHANGES.md b/CHANGES.md index a58f3cea21..37c1edcba8 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,4 +1,4 @@ -# synapse 1.144.0rc1 (2025-12-02) +# Synapse 1.144.0rc1 (2025-12-02) Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`), please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11440) as this release contains changes that disable that endpoint by default. @@ -12,6 +12,7 @@ Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-pr - Fix a bug in the database function for fetching state deltas that could result in unnecessarily long query times. ([\#18960](https://github.com/element-hq/synapse/issues/18960)) - Fix v12 rooms when running with `use_frozen_dicts: True`. ([\#19235](https://github.com/element-hq/synapse/issues/19235)) - Fix bug where invalid `canonical_alias` content would return 500 instead of 400. ([\#19240](https://github.com/element-hq/synapse/issues/19240)) +- Fix bug where `Duration` was logged incorrectly. ([\#19267](https://github.com/element-hq/synapse/issues/19267)) ## Improved Documentation diff --git a/changelog.d/19267.bugfix b/changelog.d/19267.bugfix deleted file mode 100644 index 6c7ed750ec..0000000000 --- a/changelog.d/19267.bugfix +++ /dev/null @@ -1 +0,0 @@ -Fix bug where `Duration` was logged incorrectly. From 1aeb34a1e14e4161d450b31cb6aeaba3adea9ed1 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 9 Dec 2025 08:32:23 -0700 Subject: [PATCH 43/47] 1.144.0 --- CHANGES.md | 7 +++++++ debian/changelog | 6 ++++++ pyproject.toml | 2 +- 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CHANGES.md b/CHANGES.md index 37c1edcba8..ef0b41bd85 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,10 @@ +# Synapse 1.144.0 (2025-12-09) + +No significant changes since 1.144.0rc1. + + + + # Synapse 1.144.0rc1 (2025-12-02) Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`), please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11440) as this release contains changes that disable that endpoint by default. diff --git a/debian/changelog b/debian/changelog index 901c210d60..15ff7cbd9d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +matrix-synapse-py3 (1.144.0) stable; urgency=medium + + * New Synapse release 1.144.0. + + -- Synapse Packaging team Tue, 09 Dec 2025 08:30:40 -0700 + matrix-synapse-py3 (1.144.0~rc1) stable; urgency=medium * New Synapse release 1.144.0rc1. diff --git a/pyproject.toml b/pyproject.toml index ce2ecf6363..38f5990cc7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "matrix-synapse" -version = "1.144.0rc1" +version = "1.144.0" description = "Homeserver for the Matrix decentralised comms protocol" readme = "README.rst" authors = [ From 1bfcc9acf102fb902daa546b810786f707f4aa96 Mon Sep 17 00:00:00 2001 From: Devon Hudson Date: Tue, 9 Dec 2025 08:36:59 -0700 Subject: [PATCH 44/47] Lift important notes to top of changelog --- CHANGES.md | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index ef0b41bd85..83ab1849e6 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,5 +1,19 @@ # Synapse 1.144.0 (2025-12-09) +## Deprecation of MacOS Python wheels + +The team has decided to deprecate and stop publishing python wheels for MacOS. +Synapse docker images will continue to work on MacOS, as will building Synapse +from source (though note this requires a Rust compiler). + +## Unstable mutual rooms endpoint is now behind an experimental feature flag + +Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`), +please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11440) as this release contains changes +that disable that endpoint by default. + + + No significant changes since 1.144.0rc1. From a1aeb7938c0fdea44915c508d582f5f1f804d4d7 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 13 Jan 2026 11:15:24 -0600 Subject: [PATCH 45/47] chore: Adjust famedly metrics to use new Duration class for timedeltas --- tests/metrics/test_common_usage_metrics.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/metrics/test_common_usage_metrics.py b/tests/metrics/test_common_usage_metrics.py index b0a75768a6..8666754eb5 100644 --- a/tests/metrics/test_common_usage_metrics.py +++ b/tests/metrics/test_common_usage_metrics.py @@ -4,6 +4,7 @@ from synapse.server import HomeServer from synapse.types import create_requester from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests.unittest import FederatingHomeserverTestCase @@ -159,7 +160,7 @@ def test_retained_users_gauge_update(self) -> None: # start the user_daily_visits table update loop self.clock.looping_call( self.hs.get_datastores().main.generate_user_daily_visits, - 5 * 60 * 1000, + Duration(minutes=5), ) metrics = self.get_success(self.manager.get_metrics()) From 3abefb55d90a7ac09bf932992c2a20c37248b2b8 Mon Sep 17 00:00:00 2001 From: FrenchGithubUser Date: Wed, 14 Jan 2026 12:24:24 +0100 Subject: [PATCH 46/47] ci: generate requirements.txt file from synapse's poetry.lock for invite-checker and token-authenticator tests (#227) --- .github/workflows/famedly-tests.yml | 72 +++++++++++++++++++++-------- 1 file changed, 52 insertions(+), 20 deletions(-) diff --git a/.github/workflows/famedly-tests.yml b/.github/workflows/famedly-tests.yml index b95df7d952..671b9c09af 100644 --- a/.github/workflows/famedly-tests.yml +++ b/.github/workflows/famedly-tests.yml @@ -386,8 +386,15 @@ jobs: with: python-version: "3.13" - - name: Install hatch - run: pip install hatch + - name: Install hatch and poetry + run: pip install hatch poetry poetry-plugin-export + + - name: Generate requirements from Synapse lockfile + # hatch can't read synapse's lock file, we export it in a format hatch can use, + # this allows us to install synapse dependencies on the version they are locked at, + # this avoids errors due to new dependency versions with breaking changes. + run: | + poetry export --without-hashes --format requirements.txt --output "${{ github.workspace }}/synapse-invite-checker/synapse-requirements.txt" - name: Determine synapse git ref id: synapse-ref @@ -402,17 +409,30 @@ jobs: - name: Update dependency to the current branch working-directory: synapse-invite-checker + # the synapse dependency of the invite-checker is already pointing to synapse's master branch + # we skip the branch update when the CI runs on master run: | - sed -i 's|"matrix-synapse[^"]*"|"matrix-synapse @ git+https://github.com/${{ steps.synapse-ref.outputs.repo }}.git@${{ steps.synapse-ref.outputs.ref }}"|' pyproject.toml - # Check if the file was actually modified - if git diff --exit-code pyproject.toml > /dev/null; then - echo "::error::The sed command did not modify pyproject.toml. Check if the 'matrix-synapse' dependency exists in the file." - exit 1 + if [ "${{ steps.synapse-ref.outputs.ref }}" != "master" ]; then + sed -i 's|"matrix-synapse[^"]*"|"matrix-synapse @ git+https://github.com/${{ steps.synapse-ref.outputs.repo }}.git@${{ steps.synapse-ref.outputs.ref }}"|' pyproject.toml + # Check if the file was actually modified + if git diff --exit-code pyproject.toml > /dev/null; then + echo "::error::The sed command did not modify pyproject.toml. Check if the 'matrix-synapse' dependency exists in the file." + exit 1 + fi fi - name: Run invite-checker tests working-directory: synapse-invite-checker - run: hatch run cov + run: hatch run pip install -r synapse-requirements.txt && hatch run cov + + - name: Display Hatch Environment Info + if: always() + working-directory: synapse-invite-checker + run: | + echo "### Hatch Environment Details" + hatch env show + echo "### Installed Packages" + hatch run pip freeze token-authenticator: if: ${{ !failure() && !cancelled() }} @@ -435,8 +455,15 @@ jobs: with: python-version: "3.13" - - name: Install hatch - run: pip install hatch + - name: Install hatch and poetry + run: pip install hatch poetry poetry-plugin-export + + - name: Generate requirements from Synapse lockfile + # hatch can't read synapse's lock file, we export it in a format hatch can use + # this allows us to install synapse dependencies on the version they are locked at, + # this avoids errors due to new dependency versions with breaking changes + run: | + poetry export --without-hashes --format requirements.txt --output "${{ github.workspace }}/synapse-token-authenticator/synapse-requirements.txt" - name: Determine synapse git ref id: synapse-ref @@ -451,21 +478,26 @@ jobs: - name: Update dependency to the current branch working-directory: synapse-token-authenticator - # the synapse dependency of the token-authenticator is already pointing to synapse's master branch - # we skip the branch update when the CI runs on master run: | - if [ "${{ steps.synapse-ref.outputs.ref }}" != "master" ]; then - sed -i 's|"matrix-synapse[^"]*"|"matrix-synapse @ git+https://github.com/${{ steps.synapse-ref.outputs.repo }}.git@${{ steps.synapse-ref.outputs.ref }}"|' pyproject.toml - # Check if the file was actually modified - if git diff --exit-code pyproject.toml > /dev/null; then - echo "::error::The sed command did not modify pyproject.toml. Check if the 'matrix-synapse' dependency exists in the file." - exit 1 - fi + sed -i 's|"matrix-synapse[^"]*"|"matrix-synapse @ git+https://github.com/${{ steps.synapse-ref.outputs.repo }}.git@${{ steps.synapse-ref.outputs.ref }}"|' pyproject.toml + # Check if the file was actually modified + if git diff --exit-code pyproject.toml > /dev/null; then + echo "::error::The sed command did not modify pyproject.toml. Check if the 'matrix-synapse' dependency exists in the file." + exit 1 fi - name: Run token-authenticator tests working-directory: synapse-token-authenticator - run: hatch run cov + run: hatch run pip install -r synapse-requirements.txt && hatch run cov + + - name: Display Hatch Environment Info + if: always() + working-directory: synapse-token-authenticator + run: | + echo "### Hatch Environment Details" + hatch env show + echo "### Installed Packages" + hatch run pip freeze otlp: if: ${{ !failure() && !cancelled() }} From f911acc83cdd5dc4a57f85685c6cec70fbb56123 Mon Sep 17 00:00:00 2001 From: Jason Little Date: Tue, 13 Jan 2026 11:44:16 -0600 Subject: [PATCH 47/47] Update changelog --- CHANGES.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/CHANGES.md b/CHANGES.md index c5459b649a..328518c77a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -17,6 +17,10 @@ that disable that endpoint by default. No significant changes since 1.144.0rc1. +### Famedly additions for v1.143.0_1 +- ci: generate requirements.txt file from synapse's poetry.lock for invite-checker and token-authenticator tests ([\#227(https://github.com/famedly/synapse/pull/227)]) (FrenchGithubUser) +- ci: fix tests failing as the token-authenticator's synapse dependency is already pointing to the master branch ([\#226](https://github.com/famedly/synapse/pull/226)) (FrenchGithubUser) +- chore: Remove unused make_release.sh script and update README.rst ([\#224](https://github.com/famedly/synapse/pull/224)) (Jason Little) # Synapse 1.144.0rc1 (2025-12-02)