diff --git a/.github/workflows/docker.yml b/.github/workflows/docker.yml index 52a0762efc..aaf1e22d3c 100644 --- a/.github/workflows/docker.yml +++ b/.github/workflows/docker.yml @@ -31,7 +31,7 @@ jobs: uses: docker/setup-buildx-action@e468171a9de216ec08956ac3ada2f0791b6bd435 # v3.11.1 - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Extract version from pyproject.toml # Note: explicitly requesting bash will mean bash is invoked with `-eo pipefail`, see @@ -123,7 +123,7 @@ jobs: uses: sigstore/cosign-installer@faadad0cce49287aee09b3a48701e75088a2c6ad # v4.0.0 - name: Calculate docker image tag - uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0 + uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 with: images: ${{ matrix.repository }} flavor: | diff --git a/.github/workflows/docs-pr.yaml b/.github/workflows/docs-pr.yaml index 6a61dd5fb1..4d28533a27 100644 --- a/.github/workflows/docs-pr.yaml +++ b/.github/workflows/docs-pr.yaml @@ -13,7 +13,7 @@ jobs: name: GitHub Pages runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: # Fetch all history so that the schema_versions script works. fetch-depth: 0 @@ -24,7 +24,7 @@ jobs: mdbook-version: '0.4.17' - name: Setup python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" @@ -50,7 +50,7 @@ jobs: name: Check links in documentation runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Setup mdbook uses: peaceiris/actions-mdbook@ee69d230fe19748b7abf22df32acaa93833fad08 # v2.0.0 diff --git a/.github/workflows/docs.yaml b/.github/workflows/docs.yaml index a342a94add..e8cfd02e47 100644 --- a/.github/workflows/docs.yaml +++ b/.github/workflows/docs.yaml @@ -54,7 +54,7 @@ jobs: - pre steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: # Fetch all history so that the schema_versions script works. fetch-depth: 0 @@ -81,7 +81,7 @@ jobs: run: echo 'window.SYNAPSE_VERSION = "${{ needs.pre.outputs.branch-version }}";' > ./docs/website_files/version.js - name: Setup python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" diff --git a/.github/workflows/famedly-tests.yml b/.github/workflows/famedly-tests.yml index b95df7d952..671b9c09af 100644 --- a/.github/workflows/famedly-tests.yml +++ b/.github/workflows/famedly-tests.yml @@ -386,8 +386,15 @@ jobs: with: python-version: "3.13" - - name: Install hatch - run: pip install hatch + - name: Install hatch and poetry + run: pip install hatch poetry poetry-plugin-export + + - name: Generate requirements from Synapse lockfile + # hatch can't read synapse's lock file, we export it in a format hatch can use, + # this allows us to install synapse dependencies on the version they are locked at, + # this avoids errors due to new dependency versions with breaking changes. + run: | + poetry export --without-hashes --format requirements.txt --output "${{ github.workspace }}/synapse-invite-checker/synapse-requirements.txt" - name: Determine synapse git ref id: synapse-ref @@ -402,17 +409,30 @@ jobs: - name: Update dependency to the current branch working-directory: synapse-invite-checker + # the synapse dependency of the invite-checker is already pointing to synapse's master branch + # we skip the branch update when the CI runs on master run: | - sed -i 's|"matrix-synapse[^"]*"|"matrix-synapse @ git+https://github.com/${{ steps.synapse-ref.outputs.repo }}.git@${{ steps.synapse-ref.outputs.ref }}"|' pyproject.toml - # Check if the file was actually modified - if git diff --exit-code pyproject.toml > /dev/null; then - echo "::error::The sed command did not modify pyproject.toml. Check if the 'matrix-synapse' dependency exists in the file." - exit 1 + if [ "${{ steps.synapse-ref.outputs.ref }}" != "master" ]; then + sed -i 's|"matrix-synapse[^"]*"|"matrix-synapse @ git+https://github.com/${{ steps.synapse-ref.outputs.repo }}.git@${{ steps.synapse-ref.outputs.ref }}"|' pyproject.toml + # Check if the file was actually modified + if git diff --exit-code pyproject.toml > /dev/null; then + echo "::error::The sed command did not modify pyproject.toml. Check if the 'matrix-synapse' dependency exists in the file." + exit 1 + fi fi - name: Run invite-checker tests working-directory: synapse-invite-checker - run: hatch run cov + run: hatch run pip install -r synapse-requirements.txt && hatch run cov + + - name: Display Hatch Environment Info + if: always() + working-directory: synapse-invite-checker + run: | + echo "### Hatch Environment Details" + hatch env show + echo "### Installed Packages" + hatch run pip freeze token-authenticator: if: ${{ !failure() && !cancelled() }} @@ -435,8 +455,15 @@ jobs: with: python-version: "3.13" - - name: Install hatch - run: pip install hatch + - name: Install hatch and poetry + run: pip install hatch poetry poetry-plugin-export + + - name: Generate requirements from Synapse lockfile + # hatch can't read synapse's lock file, we export it in a format hatch can use + # this allows us to install synapse dependencies on the version they are locked at, + # this avoids errors due to new dependency versions with breaking changes + run: | + poetry export --without-hashes --format requirements.txt --output "${{ github.workspace }}/synapse-token-authenticator/synapse-requirements.txt" - name: Determine synapse git ref id: synapse-ref @@ -451,21 +478,26 @@ jobs: - name: Update dependency to the current branch working-directory: synapse-token-authenticator - # the synapse dependency of the token-authenticator is already pointing to synapse's master branch - # we skip the branch update when the CI runs on master run: | - if [ "${{ steps.synapse-ref.outputs.ref }}" != "master" ]; then - sed -i 's|"matrix-synapse[^"]*"|"matrix-synapse @ git+https://github.com/${{ steps.synapse-ref.outputs.repo }}.git@${{ steps.synapse-ref.outputs.ref }}"|' pyproject.toml - # Check if the file was actually modified - if git diff --exit-code pyproject.toml > /dev/null; then - echo "::error::The sed command did not modify pyproject.toml. Check if the 'matrix-synapse' dependency exists in the file." - exit 1 - fi + sed -i 's|"matrix-synapse[^"]*"|"matrix-synapse @ git+https://github.com/${{ steps.synapse-ref.outputs.repo }}.git@${{ steps.synapse-ref.outputs.ref }}"|' pyproject.toml + # Check if the file was actually modified + if git diff --exit-code pyproject.toml > /dev/null; then + echo "::error::The sed command did not modify pyproject.toml. Check if the 'matrix-synapse' dependency exists in the file." + exit 1 fi - name: Run token-authenticator tests working-directory: synapse-token-authenticator - run: hatch run cov + run: hatch run pip install -r synapse-requirements.txt && hatch run cov + + - name: Display Hatch Environment Info + if: always() + working-directory: synapse-token-authenticator + run: | + echo "### Hatch Environment Details" + hatch env show + echo "### Installed Packages" + hatch run pip freeze otlp: if: ${{ !failure() && !cancelled() }} diff --git a/.github/workflows/fix_lint.yaml b/.github/workflows/fix_lint.yaml index c33481a51e..9daea3f378 100644 --- a/.github/workflows/fix_lint.yaml +++ b/.github/workflows/fix_lint.yaml @@ -18,14 +18,14 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} components: clippy, rustfmt - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Setup Poetry uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 diff --git a/.github/workflows/latest_deps.yml b/.github/workflows/latest_deps.yml index 2076a1c1e1..c356ee8e3d 100644 --- a/.github/workflows/latest_deps.yml +++ b/.github/workflows/latest_deps.yml @@ -42,12 +42,12 @@ jobs: if: needs.check_repo.outputs.should_run_workflow == 'true' runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 # The dev dependencies aren't exposed in the wheel metadata (at least with current # poetry-core versions), so we install with poetry. @@ -77,13 +77,13 @@ jobs: postgres-version: "14" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: sudo apt-get -qq install xmlsec1 - name: Set up PostgreSQL ${{ matrix.postgres-version }} @@ -93,7 +93,7 @@ jobs: -e POSTGRES_PASSWORD=postgres \ -e POSTGRES_INITDB_ARGS="--lc-collate C --lc-ctype C --encoding UTF8" \ postgres:${{ matrix.postgres-version }} - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: pip install .[all,test] @@ -152,13 +152,13 @@ jobs: BLACKLIST: ${{ matrix.workers && 'synapse-blacklist-with-workers' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Ensure sytest runs `pip install` # Delete the lockfile so sytest will `pip install` rather than `poetry install` @@ -202,14 +202,14 @@ jobs: steps: - name: Check out synapse codebase - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: synapse - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod @@ -234,7 +234,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/poetry_lockfile.yaml b/.github/workflows/poetry_lockfile.yaml index 19468c2d92..5c139bf574 100644 --- a/.github/workflows/poetry_lockfile.yaml +++ b/.github/workflows/poetry_lockfile.yaml @@ -16,8 +16,8 @@ jobs: name: "Check locked dependencies have sdists" runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.x' - run: pip install tomli diff --git a/.github/workflows/push_complement_image.yml b/.github/workflows/push_complement_image.yml index e08775e588..ed82482505 100644 --- a/.github/workflows/push_complement_image.yml +++ b/.github/workflows/push_complement_image.yml @@ -33,17 +33,17 @@ jobs: packages: write steps: - name: Checkout specific branch (debug build) - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 if: github.event_name == 'workflow_dispatch' with: ref: ${{ inputs.branch }} - name: Checkout clean copy of develop (scheduled build) - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 if: github.event_name == 'schedule' with: ref: develop - name: Checkout clean copy of master (on-push) - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 if: github.event_name == 'push' with: ref: master @@ -55,7 +55,7 @@ jobs: password: ${{ secrets.GITHUB_TOKEN }} - name: Work out labels for complement image id: meta - uses: docker/metadata-action@318604b99e75e41977312d83839a89be02ca4893 # v5.9.0 + uses: docker/metadata-action@c299e40c65443455700f0fdfc63efafe5b349051 # v5.10.0 with: images: ghcr.io/${{ github.repository }}/complement-synapse tags: | diff --git a/.github/workflows/release-artifacts.yml b/.github/workflows/release-artifacts.yml index c88546c3bf..531680b989 100644 --- a/.github/workflows/release-artifacts.yml +++ b/.github/workflows/release-artifacts.yml @@ -27,8 +27,8 @@ jobs: name: "Calculate list of debian distros" runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - id: set-distros @@ -55,7 +55,7 @@ jobs: steps: - name: Checkout - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: src @@ -74,7 +74,7 @@ jobs: ${{ runner.os }}-buildx- - name: Set up python - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" @@ -114,27 +114,20 @@ jobs: os: - ubuntu-24.04 - ubuntu-24.04-arm - - macos-14 # This uses arm64 - - macos-15-intel # This uses x86-64 # is_pr is a flag used to exclude certain jobs from the matrix on PRs. # It is not read by the rest of the workflow. is_pr: - ${{ startsWith(github.ref, 'refs/pull/') }} exclude: - # Don't build macos wheels on PR CI. - - is_pr: true - os: "macos-15-intel" - - is_pr: true - os: "macos-14" # Don't build aarch64 wheels on PR CI. - is_pr: true os: "ubuntu-24.04-arm" steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: # setup-python@v4 doesn't impose a default python version. Need to use 3.x # here, because `python` on osx points to Python 2.7. @@ -170,8 +163,8 @@ jobs: if: ${{ !startsWith(github.ref, 'refs/pull/') }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.10" diff --git a/.github/workflows/schema.yaml b/.github/workflows/schema.yaml index 6c416e762d..0755a5f023 100644 --- a/.github/workflows/schema.yaml +++ b/.github/workflows/schema.yaml @@ -14,8 +14,8 @@ jobs: name: Ensure Synapse config schema is valid runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - name: Install check-jsonschema @@ -40,8 +40,8 @@ jobs: name: Ensure generated documentation is up-to-date runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - name: Install PyYAML diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index f320e89069..aff2832b94 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -86,12 +86,12 @@ jobs: if: ${{ needs.changes.outputs.linting == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: python-version: "3.x" @@ -106,18 +106,18 @@ jobs: if: ${{ needs.changes.outputs.linting == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20'" + - run: "pip install 'click==8.1.1' 'GitPython>=3.1.20' 'sqlglot>=28.0.0'" - run: scripts-dev/check_schema_delta.py --force-colors check-lockfile: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: .ci/scripts/check_lockfile.py @@ -129,7 +129,7 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Setup Poetry uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 @@ -151,13 +151,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Setup Poetry uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 @@ -187,19 +187,20 @@ jobs: lint-crlf: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Check line endings run: scripts-dev/check_line_terminators.sh lint-newsfile: - if: ${{ (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.actor != 'dependabot[bot]' }} + # Only run on pull_request events, targeting develop/release branches, and skip when the PR author is dependabot[bot]. + if: ${{ github.event_name == 'pull_request' && (github.base_ref == 'develop' || contains(github.base_ref, 'release-')) && github.event.pull_request.user.login != 'dependabot[bot]' }} runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: "pip install 'towncrier>=18.6.0rc1'" @@ -213,14 +214,14 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: components: clippy toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo clippy -- -D warnings @@ -232,14 +233,14 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: nightly-2025-04-23 components: clippy - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo clippy --all-features -- -D warnings @@ -250,13 +251,13 @@ jobs: steps: - name: Checkout repository - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Setup Poetry uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 @@ -286,7 +287,7 @@ jobs: if: ${{ needs.changes.outputs.rust == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master @@ -295,7 +296,7 @@ jobs: # `.rustfmt.toml`. toolchain: nightly-2025-04-23 components: rustfmt - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo fmt --check @@ -306,8 +307,8 @@ jobs: needs: changes if: ${{ needs.changes.outputs.linting_readme == 'true' }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - run: "pip install rstcheck" @@ -354,8 +355,8 @@ jobs: needs: linting-done runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: "3.x" - id: get-matrix @@ -375,7 +376,7 @@ jobs: job: ${{ fromJson(needs.calculate-test-jobs.outputs.trial_test_matrix) }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - run: sudo apt-get -qq install xmlsec1 - name: Set up PostgreSQL ${{ matrix.job.postgres-version }} if: ${{ matrix.job.postgres-version }} @@ -393,7 +394,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: @@ -431,13 +432,13 @@ jobs: - changes runs-on: ubuntu-22.04 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 # There aren't wheels for some of the older deps, so we need to install # their build dependencies @@ -446,7 +447,7 @@ jobs: sudo apt-get -qq install build-essential libffi-dev python3-dev \ libxml2-dev libxslt-dev xmlsec1 zlib1g-dev libjpeg-dev libwebp-dev - - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0 + - uses: actions/setup-python@83679a892e2d95755f2dac6acb0bfd1e9ac5d548 # v6.1.0 with: python-version: '3.10' @@ -496,7 +497,7 @@ jobs: extras: ["all"] steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 # Install libs necessary for PyPy to build binary wheels for dependencies - run: sudo apt-get -qq install xmlsec1 libxml2-dev libxslt-dev - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 @@ -546,7 +547,7 @@ jobs: job: ${{ fromJson(needs.calculate-test-jobs.outputs.sytest_test_matrix) }} steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Prepare test blacklist run: cat sytest-blacklist .ci/worker-blacklist > synapse-blacklist-with-workers @@ -554,7 +555,7 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Run SyTest run: /bootstrap.sh synapse @@ -593,7 +594,7 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - run: sudo apt-get -qq install xmlsec1 postgresql-client - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: @@ -637,7 +638,7 @@ jobs: --health-retries 5 steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Add PostgreSQL apt repository # We need a version of pg_dump that can handle the version of # PostgreSQL being tested against. The Ubuntu package repository lags @@ -692,7 +693,7 @@ jobs: steps: - name: Checkout synapse codebase - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: synapse @@ -700,12 +701,12 @@ jobs: uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod @@ -728,13 +729,13 @@ jobs: - changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo test @@ -748,13 +749,13 @@ jobs: - changes steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: nightly-2022-12-01 - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - run: cargo bench --no-run diff --git a/.github/workflows/triage_labelled.yml b/.github/workflows/triage_labelled.yml index d291eea3a1..34222b7d1b 100644 --- a/.github/workflows/triage_labelled.yml +++ b/.github/workflows/triage_labelled.yml @@ -22,7 +22,7 @@ jobs: # This field is case-sensitive. TARGET_STATUS: Needs info steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: # Only clone the script file we care about, instead of the whole repo. sparse-checkout: .ci/scripts/triage_labelled_issue.sh diff --git a/.github/workflows/twisted_trunk.yml b/.github/workflows/twisted_trunk.yml index 11b7bfe143..325902f131 100644 --- a/.github/workflows/twisted_trunk.yml +++ b/.github/workflows/twisted_trunk.yml @@ -43,13 +43,13 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: @@ -70,14 +70,14 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - run: sudo apt-get -qq install xmlsec1 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - uses: matrix-org/setup-python-poetry@5bbf6603c5c930615ec8a29f1b5d7d258d905aa4 # v2.0.0 with: @@ -117,13 +117,13 @@ jobs: - ${{ github.workspace }}:/src steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - name: Install Rust uses: dtolnay/rust-toolchain@e97e2d8cc328f1b50210efc529dca0028893a2d9 # master with: toolchain: ${{ env.RUST_VERSION }} - - uses: Swatinem/rust-cache@f13886b937689c021905a6b90929199931d60db1 # v2.8.1 + - uses: Swatinem/rust-cache@779680da715d629ac1d338a641029a2f4372abb5 # v2.8.2 - name: Patch dependencies # Note: The poetry commands want to create a virtualenv in /src/.venv/, @@ -175,14 +175,14 @@ jobs: steps: - name: Run actions/checkout@v4 for synapse - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 with: path: synapse - name: Prepare Complement's Prerequisites run: synapse/.ci/scripts/setup_complement_prerequisites.sh - - uses: actions/setup-go@44694675825211faa026b3c33043df3e48a5fa00 # v6.0.0 + - uses: actions/setup-go@4dc6199c7b1a012772edbd06daecab0f50c9053c # v6.1.0 with: cache-dependency-path: complement/go.sum go-version-file: complement/go.mod @@ -217,7 +217,7 @@ jobs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0 + - uses: actions/checkout@1af3b93b6815bc44a9784bd300feb67ff0d1eeb3 # v6.0.0 - uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2 env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/CHANGES.md b/CHANGES.md index 2d9ac8b065..328518c77a 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,89 @@ +# Synapse 1.144.0 (2025-12-09) + +## Deprecation of MacOS Python wheels + +The team has decided to deprecate and stop publishing python wheels for MacOS. +Synapse docker images will continue to work on MacOS, as will building Synapse +from source (though note this requires a Rust compiler). + +## Unstable mutual rooms endpoint is now behind an experimental feature flag + +Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`), +please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11440) as this release contains changes +that disable that endpoint by default. + + + +No significant changes since 1.144.0rc1. + + +### Famedly additions for v1.143.0_1 +- ci: generate requirements.txt file from synapse's poetry.lock for invite-checker and token-authenticator tests ([\#227(https://github.com/famedly/synapse/pull/227)]) (FrenchGithubUser) +- ci: fix tests failing as the token-authenticator's synapse dependency is already pointing to the master branch ([\#226](https://github.com/famedly/synapse/pull/226)) (FrenchGithubUser) +- chore: Remove unused make_release.sh script and update README.rst ([\#224](https://github.com/famedly/synapse/pull/224)) (Jason Little) + + +# Synapse 1.144.0rc1 (2025-12-02) + +Admins using the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`), please check [the relevant section in the upgrade notes](https://github.com/element-hq/synapse/blob/develop/docs/upgrade.md#upgrading-to-v11440) as this release contains changes that disable that endpoint by default. + +## Features + +- Add experimentatal implememntation of [MSC4380](https://github.com/matrix-org/matrix-spec-proposals/pull/4380) (invite blocking). ([\#19203](https://github.com/element-hq/synapse/issues/19203)) +- Allow restarting delayed event timeouts on workers. ([\#19207](https://github.com/element-hq/synapse/issues/19207)) + +## Bugfixes + +- Fix a bug in the database function for fetching state deltas that could result in unnecessarily long query times. ([\#18960](https://github.com/element-hq/synapse/issues/18960)) +- Fix v12 rooms when running with `use_frozen_dicts: True`. ([\#19235](https://github.com/element-hq/synapse/issues/19235)) +- Fix bug where invalid `canonical_alias` content would return 500 instead of 400. ([\#19240](https://github.com/element-hq/synapse/issues/19240)) +- Fix bug where `Duration` was logged incorrectly. ([\#19267](https://github.com/element-hq/synapse/issues/19267)) + +## Improved Documentation + +- Document in the `--config-path` help how multiple files are merged - by merging them shallowly. ([\#19243](https://github.com/element-hq/synapse/issues/19243)) + +## Deprecations and Removals + +- Stop building release wheels for MacOS. ([\#19225](https://github.com/element-hq/synapse/issues/19225)) + +## Internal Changes + +- Improve event filtering for Simplified Sliding Sync. ([\#17782](https://github.com/element-hq/synapse/issues/17782)) +- Export `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` environment variable from `scripts-dev/complement.sh`. ([\#19208](https://github.com/element-hq/synapse/issues/19208)) +- Refactor `scripts-dev/complement.sh` logic to avoid `exit` to facilitate being able to source it from other scripts (composable). ([\#19209](https://github.com/element-hq/synapse/issues/19209)) +- Expire sliding sync connections that are too old or have too much pending data. ([\#19211](https://github.com/element-hq/synapse/issues/19211)) +- Require an experimental feature flag to be enabled in order for the unstable [MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) endpoint (`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) to be available. ([\#19219](https://github.com/element-hq/synapse/issues/19219)) +- Prevent changelog check CI running on @dependabot's PRs even when a human has modified the branch. ([\#19220](https://github.com/element-hq/synapse/issues/19220)) +- Auto-fix trailing spaces in multi-line strings and comments when running the lint script. ([\#19221](https://github.com/element-hq/synapse/issues/19221)) +- Move towards using a dedicated `Duration` type. ([\#19223](https://github.com/element-hq/synapse/issues/19223), [\#19229](https://github.com/element-hq/synapse/issues/19229)) +- Improve robustness of the SQL schema linting in CI. ([\#19224](https://github.com/element-hq/synapse/issues/19224)) +- Add log to determine whether clients are using `/messages` as expected. ([\#19226](https://github.com/element-hq/synapse/issues/19226)) +- Simplify README and add ESS Getting started section. ([\#19228](https://github.com/element-hq/synapse/issues/19228), [\#19259](https://github.com/element-hq/synapse/issues/19259)) +- Add a unit test for ensuring associated refresh tokens are erased when a device is deleted. ([\#19230](https://github.com/element-hq/synapse/issues/19230)) +- Prompt user to consider adding future deprecations to the changelog in release script. ([\#19239](https://github.com/element-hq/synapse/issues/19239)) +- Fix check of the Rust compiled code being outdated when using source checkout and `.egg-info`. ([\#19251](https://github.com/element-hq/synapse/issues/19251)) +- Stop building macos wheels in CI pipeline. ([\#19263](https://github.com/element-hq/synapse/issues/19263)) + + + +### Updates to locked dependencies + +* Bump Swatinem/rust-cache from 2.8.1 to 2.8.2. ([\#19244](https://github.com/element-hq/synapse/issues/19244)) +* Bump actions/checkout from 5.0.0 to 6.0.0. ([\#19213](https://github.com/element-hq/synapse/issues/19213)) +* Bump actions/setup-go from 6.0.0 to 6.1.0. ([\#19214](https://github.com/element-hq/synapse/issues/19214)) +* Bump actions/setup-python from 6.0.0 to 6.1.0. ([\#19245](https://github.com/element-hq/synapse/issues/19245)) +* Bump attrs from 25.3.0 to 25.4.0. ([\#19215](https://github.com/element-hq/synapse/issues/19215)) +* Bump docker/metadata-action from 5.9.0 to 5.10.0. ([\#19246](https://github.com/element-hq/synapse/issues/19246)) +* Bump http from 1.3.1 to 1.4.0. ([\#19249](https://github.com/element-hq/synapse/issues/19249)) +* Bump pydantic from 2.12.4 to 2.12.5. ([\#19250](https://github.com/element-hq/synapse/issues/19250)) +* Bump pyopenssl from 25.1.0 to 25.3.0. ([\#19248](https://github.com/element-hq/synapse/issues/19248)) +* Bump rpds-py from 0.28.0 to 0.29.0. ([\#19216](https://github.com/element-hq/synapse/issues/19216)) +* Bump rpds-py from 0.29.0 to 0.30.0. ([\#19247](https://github.com/element-hq/synapse/issues/19247)) +* Bump sentry-sdk from 2.44.0 to 2.46.0. ([\#19218](https://github.com/element-hq/synapse/issues/19218)) +* Bump types-bleach from 6.2.0.20250809 to 6.3.0.20251115. ([\#19217](https://github.com/element-hq/synapse/issues/19217)) +* Bump types-jsonschema from 4.25.1.20250822 to 4.25.1.20251009. ([\#19252](https://github.com/element-hq/synapse/issues/19252)) + # Synapse 1.143.0 (2025-11-25) ## Dropping support for PostgreSQL 13 @@ -23,7 +109,7 @@ No significant changes since 1.143.0rc2. - bump: Synapse Invite Checker to version v0.4.13[(b601d6f)](https://github.com/famedly/synapse/pull/220/commits/b601d6fd4bd25d9909bcfba0a595538133340f88) (Jason Little) - tests: run the invite-checker and token-authenticator tests in the CI([\#222](https://github.com/famedly/synapse/pull/222/commits/3dd03b161807d868edbe8c26d46c80264ccca0c9)) (FrenchGithubUser) -# synapse 1.143.0rc2 (2025-11-18) +# Synapse 1.143.0rc2 (2025-11-18) ## Dropping support for PostgreSQL 13 diff --git a/Cargo.lock b/Cargo.lock index c89d0829ba..007428a380 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -374,12 +374,11 @@ checksum = "7f24254aa9a54b5c858eaee2f5bccdb46aaf0e486a595ed5fd8f86ba55232a70" [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] diff --git a/README.rst b/README.rst index 8abe848bf9..7a98306eaa 100644 --- a/README.rst +++ b/README.rst @@ -15,7 +15,6 @@ The original Synapse is written and maintained by `Element ` You can directly run and manage the source code in this repository, available under an AGPL license. -.. contents:: Release process for this fork ============================= @@ -23,149 +22,18 @@ Release process for this fork There is more information for Famedly employees in `Notion `__ -🛠️ Installation and configuration -================================== +🛠️ Standalone installation and configuration +============================================ -The Synapse documentation describes `how to install Synapse `_. We recommend using -`Docker images `_ or `Debian packages from Matrix.org -`_. +The Synapse documentation describes `options for installing Synapse standalone +`_. See +below for more useful documentation links. -.. _federation: +- `Synapse configuration options `_ +- `Synapse configuration for federation `_ +- `Using a reverse proxy with Synapse `_ +- `Upgrading Synapse `_ -Synapse has a variety of `config options -`_ -which can be used to customise its behaviour after installation. -There are additional details on how to `configure Synapse for federation here -`_. - -.. _reverse-proxy: - -Using a reverse proxy with Synapse ----------------------------------- - -It is recommended to put a reverse proxy such as -`nginx `_, -`Apache `_, -`Caddy `_, -`HAProxy `_ or -`relayd `_ in front of Synapse. One advantage of -doing so is that it means that you can expose the default https port (443) to -Matrix clients without needing to run Synapse with root privileges. -For information on configuring one, see `the reverse proxy docs -`_. - -Upgrading an existing Synapse ------------------------------ - -The instructions for upgrading Synapse are in `the upgrade notes`_. -Please check these instructions as upgrading may require extra steps for some -versions of Synapse. - -.. _the upgrade notes: https://famedly.github.io/synapse/latest/upgrade.html - - -Platform dependencies ---------------------- - -Synapse uses a number of platform dependencies such as Python and PostgreSQL, -and aims to follow supported upstream versions. See the -`deprecation policy `_ -for more details. - - -Security note -------------- - -Matrix serves raw, user-supplied data in some APIs -- specifically the `content -repository endpoints`_. - -.. _content repository endpoints: https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid - -Whilst we make a reasonable effort to mitigate against XSS attacks (for -instance, by using `CSP`_), a Matrix homeserver should not be hosted on a -domain hosting other web applications. This especially applies to sharing -the domain with Matrix web clients and other sensitive applications like -webmail. See -https://developer.github.com/changes/2014-04-25-user-content-security for more -information. - -.. _CSP: https://github.com/matrix-org/synapse/pull/1021 - -Ideally, the homeserver should not simply be on a different subdomain, but on -a completely different `registered domain`_ (also known as top-level site or -eTLD+1). This is because `some attacks`_ are still possible as long as the two -applications share the same registered domain. - -.. _registered domain: https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3 - -.. _some attacks: https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie - -To illustrate this with an example, if your Element Web or other sensitive web -application is hosted on ``A.example1.com``, you should ideally host Synapse on -``example2.com``. Some amount of protection is offered by hosting on -``B.example1.com`` instead, so this is also acceptable in some scenarios. -However, you should *not* host your Synapse on ``A.example1.com``. - -Note that all of the above refers exclusively to the domain used in Synapse's -``public_baseurl`` setting. In particular, it has no bearing on the domain -mentioned in MXIDs hosted on that server. - -Following this advice ensures that even if an XSS is found in Synapse, the -impact to other applications will be minimal. - - -🧪 Testing a new installation -============================= - -The easiest way to try out your new Synapse installation is by connecting to it -from a web client. - -Unless you are running a test instance of Synapse on your local machine, in -general, you will need to enable TLS support before you can successfully -connect from a client: see -`TLS certificates `_. - -An easy way to get started is to login or register via Element at -https://app.element.io/#/login or https://app.element.io/#/register respectively. -You will need to change the server you are logging into from ``matrix.org`` -and instead specify a homeserver URL of ``https://:8448`` -(or just ``https://`` if you are using a reverse proxy). -If you prefer to use another client, refer to our -`client breakdown `_. - -If all goes well you should at least be able to log in, create a room, and -start sending messages. - -.. _`client-user-reg`: - -Registering a new user from a client ------------------------------------- - -By default, registration of new users via Matrix clients is disabled. To enable -it: - -1. In the - `registration config section `_ - set ``enable_registration: true`` in ``homeserver.yaml``. -2. Then **either**: - - a. set up a `CAPTCHA `_, or - b. set ``enable_registration_without_verification: true`` in ``homeserver.yaml``. - -We **strongly** recommend using a CAPTCHA, particularly if your homeserver is exposed to -the public internet. Without it, anyone can freely register accounts on your homeserver. -This can be exploited by attackers to create spambots targeting the rest of the Matrix -federation. - -Your new Matrix ID will be formed partly from the ``server_name``, and partly -from a localpart you specify when you create the account in the form of:: - - @localpart:my.domain.name - -(pronounced "at localpart on my dot domain dot name"). - -As when logging in, you will need to specify a "Custom server". Specify your -desired ``localpart`` in the 'Username' box. 🎯 Troubleshooting and support ============================== @@ -192,35 +60,6 @@ issues for support requests, only for bug reports and feature requests. .. |docs| replace:: ``docs`` .. _docs: docs -🪪 Identity Servers -=================== - -Identity servers have the job of mapping email addresses and other 3rd Party -IDs (3PIDs) to Matrix user IDs, as well as verifying the ownership of 3PIDs -before creating that mapping. - -**Identity servers do not store accounts or credentials - these are stored and managed on homeservers. -Identity Servers are just for mapping 3rd Party IDs to Matrix IDs.** - -This process is highly security-sensitive, as there is an obvious risk of spam if it -is too easy to sign up for Matrix accounts or harvest 3PID data. In the longer -term, we hope to create a decentralised system to manage it (`matrix-doc #712 -`_), but in the meantime, -the role of managing trusted identity in the Matrix ecosystem is farmed out to -a cluster of known trusted ecosystem partners, who run 'Matrix Identity -Servers' such as `Sydent `_, whose role -is purely to authenticate and track 3PID logins and publish end-user public -keys. - -You can host your own copy of Sydent, but this will prevent you reaching other -users in the Matrix ecosystem via their email address, and prevent them finding -you. We therefore recommend that you use one of the centralised identity servers -at ``https://matrix.org`` or ``https://vector.im`` for now. - -To reiterate: the Identity server will only be used if you choose to associate -an email address with your account, or send an invite to another user via their -email address. - 🛠️ Development ============== @@ -243,11 +82,11 @@ Alongside all that, join our developer community on Matrix: Copyright and Licensing ======================= -| Copyright 2014-2017 OpenMarket Ltd -| Copyright 2017 Vector Creations Ltd -| Copyright 2017-2025 New Vector Ltd -| Copyright 2025 Famedly -| + | Copyright 2014–2017 OpenMarket Ltd + | Copyright 2017 Vector Creations Ltd + | Copyright 2017–2025 New Vector Ltd + | Copyright 2025 Element Creations Ltd + | Copyright 2025 Famedly Licensed under the AGPL. diff --git a/debian/changelog b/debian/changelog index f8cf0c86f9..15ff7cbd9d 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,15 @@ +matrix-synapse-py3 (1.144.0) stable; urgency=medium + + * New Synapse release 1.144.0. + + -- Synapse Packaging team Tue, 09 Dec 2025 08:30:40 -0700 + +matrix-synapse-py3 (1.144.0~rc1) stable; urgency=medium + + * New Synapse release 1.144.0rc1. + + -- Synapse Packaging team Tue, 02 Dec 2025 09:11:19 -0700 + matrix-synapse-py3 (1.143.0) stable; urgency=medium * New Synapse release 1.143.0. diff --git a/docker/configure_workers_and_start.py b/docker/configure_workers_and_start.py index e19b0a0039..e7cbd701b8 100755 --- a/docker/configure_workers_and_start.py +++ b/docker/configure_workers_and_start.py @@ -196,6 +196,7 @@ "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/upload", "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/device_signing/upload$", "^/_matrix/client/(api/v1|r0|v3|unstable)/keys/signatures/upload$", + "^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$", ], "shared_extra_conf": {}, "worker_extra_conf": "", diff --git a/docs/SUMMARY.md b/docs/SUMMARY.md index c6b1ffced3..2bb3a2259e 100644 --- a/docs/SUMMARY.md +++ b/docs/SUMMARY.md @@ -5,6 +5,7 @@ # Setup - [Installation](setup/installation.md) + - [Security](setup/security.md) - [Using Postgres](postgres.md) - [Configuring a Reverse Proxy](reverse_proxy.md) - [Configuring a Forward/Outbound Proxy](setup/forward_proxy.md) diff --git a/docs/setup/installation.md b/docs/setup/installation.md index bd2177021d..54a326ddbb 100644 --- a/docs/setup/installation.md +++ b/docs/setup/installation.md @@ -16,8 +16,15 @@ that your email address is probably `user@example.com` rather than `user@email.example.com`) - but doing so may require more advanced setup: see [Setting up Federation](../federate.md). +⚠️ Before setting up Synapse please consult the [security page](security.md) for +best practices. ⚠️ + ## Installing Synapse +Note: Synapse uses a number of platform dependencies such as Python and PostgreSQL, +and aims to follow supported upstream versions. See the [deprecation +policy](../deprecation_policy.md) for more details. + ### Prebuilt packages Prebuilt packages are available for a number of platforms. These are recommended diff --git a/docs/setup/security.md b/docs/setup/security.md new file mode 100644 index 0000000000..2c21b494e5 --- /dev/null +++ b/docs/setup/security.md @@ -0,0 +1,41 @@ +# Security + +This page lays out security best-practices when running Synapse. + +If you believe you have encountered a security issue, see our [Security +Disclosure Policy](https://element.io/en/security/security-disclosure-policy). + +## Content repository + +Matrix serves raw, user-supplied data in some APIs — specifically the [content +repository endpoints](https://matrix.org/docs/spec/client_server/latest.html#get-matrix-media-r0-download-servername-mediaid). + +Whilst we make a reasonable effort to mitigate against XSS attacks (for +instance, by using [CSP](https://github.com/matrix-org/synapse/pull/1021)), a +Matrix homeserver should not be hosted on a domain hosting other web +applications. This especially applies to sharing the domain with Matrix web +clients and other sensitive applications like webmail. See +https://developer.github.com/changes/2014-04-25-user-content-security for more +information. + +Ideally, the homeserver should not simply be on a different subdomain, but on a +completely different [registered +domain](https://tools.ietf.org/html/draft-ietf-httpbis-rfc6265bis-03#section-2.3) +(also known as top-level site or eTLD+1). This is because [some +attacks](https://en.wikipedia.org/wiki/Session_fixation#Attacks_using_cross-subdomain_cookie) +are still possible as long as the two applications share the same registered +domain. + + +To illustrate this with an example, if your Element Web or other sensitive web +application is hosted on `A.example1.com`, you should ideally host Synapse on +`example2.com`. Some amount of protection is offered by hosting on +`B.example1.com` instead, so this is also acceptable in some scenarios. +However, you should *not* host your Synapse on `A.example1.com`. + +Note that all of the above refers exclusively to the domain used in Synapse's +`public_baseurl` setting. In particular, it has no bearing on the domain +mentioned in MXIDs hosted on that server. + +Following this advice ensures that even if an XSS is found in Synapse, the +impact to other applications will be minimal. diff --git a/docs/upgrade.md b/docs/upgrade.md index 5c6a39e6c3..10ac083fd7 100644 --- a/docs/upgrade.md +++ b/docs/upgrade.md @@ -117,6 +117,25 @@ each upgrade are complete before moving on to the next upgrade, to avoid stacking them up. You can monitor the currently running background updates with [the Admin API](usage/administration/admin_api/background_updates.html#status). +# Upgrading to v1.144.0 + +## Worker support for unstable MSC4140 `/restart` endpoint + +The following unstable endpoint pattern may now be routed to worker processes: + +``` +^/_matrix/client/unstable/org.matrix.msc4140/delayed_events/.*/restart$ +``` + +## Unstable mutual rooms endpoint is now behind an experimental feature flag + +The unstable mutual rooms endpoint from +[MSC2666](https://github.com/matrix-org/matrix-spec-proposals/pull/2666) +(`/_matrix/client/unstable/uk.half-shot.msc2666/user/mutual_rooms`) is now +disabled by default. If you rely on this unstable endpoint, you must now set +`experimental_features.msc2666_enabled: true` in your configuration to keep +using it. + # Upgrading to v1.143.0 ## Dropping support for PostgreSQL 13 diff --git a/docs/workers.md b/docs/workers.md index f766b40251..2bc8afa74f 100644 --- a/docs/workers.md +++ b/docs/workers.md @@ -285,10 +285,13 @@ information. # User directory search requests ^/_matrix/client/(r0|v3|unstable)/user_directory/search$ + # Unstable MSC4140 support + ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events(/.*/restart)?$ + Additionally, the following REST endpoints can be handled for GET requests: + # Push rules requests ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/ - ^/_matrix/client/unstable/org.matrix.msc4140/delayed_events # Account data requests ^/_matrix/client/(r0|v3|unstable)/.*/tags diff --git a/poetry.lock b/poetry.lock index a12a6e6031..cd3b2ccffb 100644 --- a/poetry.lock +++ b/poetry.lock @@ -14,24 +14,16 @@ files = [ [[package]] name = "attrs" -version = "25.3.0" +version = "25.4.0" description = "Classes Without Boilerplate" optional = false -python-versions = ">=3.8" +python-versions = ">=3.9" groups = ["main", "dev"] files = [ - {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, - {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, + {file = "attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373"}, + {file = "attrs-25.4.0.tar.gz", hash = "sha256:16d5969b87f0859ef33a48b35d55ac1be6e42ae49d5e853b597db70c35c57e11"}, ] -[package.extras] -benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] - [[package]] name = "authlib" version = "1.6.5" @@ -2184,14 +2176,14 @@ files = [ [[package]] name = "pydantic" -version = "2.12.4" +version = "2.12.5" description = "Data validation using Python type hints" optional = false python-versions = ">=3.9" groups = ["main"] files = [ - {file = "pydantic-2.12.4-py3-none-any.whl", hash = "sha256:92d3d202a745d46f9be6df459ac5a064fdaa3c1c4cd8adcfa332ccf3c05f871e"}, - {file = "pydantic-2.12.4.tar.gz", hash = "sha256:0f8cb9555000a4b5b617f66bfd2566264c4984b27589d3b845685983e8ea85ac"}, + {file = "pydantic-2.12.5-py3-none-any.whl", hash = "sha256:e561593fccf61e8a20fc46dfc2dfe075b8be7d0188df33f221ad1f0139180f9d"}, + {file = "pydantic-2.12.5.tar.gz", hash = "sha256:4d351024c75c0f085a9febbb665ce8c0c6ec5d30e903bdb6394b7ede26aebb49"}, ] [package.dependencies] @@ -2451,18 +2443,18 @@ tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"] [[package]] name = "pyopenssl" -version = "25.1.0" +version = "25.3.0" description = "Python wrapper module around the OpenSSL library" optional = false python-versions = ">=3.7" groups = ["main"] files = [ - {file = "pyopenssl-25.1.0-py3-none-any.whl", hash = "sha256:2b11f239acc47ac2e5aca04fd7fa829800aeee22a2eb30d744572a157bd8a1ab"}, - {file = "pyopenssl-25.1.0.tar.gz", hash = "sha256:8d031884482e0c67ee92bf9a4d8cceb08d92aba7136432ffb0703c5280fc205b"}, + {file = "pyopenssl-25.3.0-py3-none-any.whl", hash = "sha256:1fda6fc034d5e3d179d39e59c1895c9faeaf40a79de5fc4cbbfbe0d36f4a77b6"}, + {file = "pyopenssl-25.3.0.tar.gz", hash = "sha256:c981cb0a3fd84e8602d7afc209522773b94c1c2446a3c710a75b06fe1beae329"}, ] [package.dependencies] -cryptography = ">=41.0.5,<46" +cryptography = ">=45.0.7,<47" typing-extensions = {version = ">=4.9", markers = "python_version < \"3.13\" and python_version >= \"3.8\""} [package.extras] @@ -2741,127 +2733,127 @@ jupyter = ["ipywidgets (>=7.5.1,<9)"] [[package]] name = "rpds-py" -version = "0.28.0" +version = "0.30.0" description = "Python bindings to Rust's persistent data structures (rpds)" optional = false python-versions = ">=3.10" groups = ["main", "dev"] files = [ - {file = "rpds_py-0.28.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:7b6013db815417eeb56b2d9d7324e64fcd4fa289caeee6e7a78b2e11fc9b438a"}, - {file = "rpds_py-0.28.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1a4c6b05c685c0c03f80dabaeb73e74218c49deea965ca63f76a752807397207"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4794c6c3fbe8f9ac87699b131a1f26e7b4abcf6d828da46a3a52648c7930eba"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:2e8456b6ee5527112ff2354dd9087b030e3429e43a74f480d4a5ca79d269fd85"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:beb880a9ca0a117415f241f66d56025c02037f7c4efc6fe59b5b8454f1eaa50d"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6897bebb118c44b38c9cb62a178e09f1593c949391b9a1a6fe777ccab5934ee7"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1b553dd06e875249fd43efd727785efb57a53180e0fde321468222eabbeaafa"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:f0b2044fdddeea5b05df832e50d2a06fe61023acb44d76978e1b060206a8a476"}, - {file = "rpds_py-0.28.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:05cf1e74900e8da73fa08cc76c74a03345e5a3e37691d07cfe2092d7d8e27b04"}, - {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:efd489fec7c311dae25e94fe7eeda4b3d06be71c68f2cf2e8ef990ffcd2cd7e8"}, - {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:ada7754a10faacd4f26067e62de52d6af93b6d9542f0df73c57b9771eb3ba9c4"}, - {file = "rpds_py-0.28.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:c2a34fd26588949e1e7977cfcbb17a9a42c948c100cab890c6d8d823f0586457"}, - {file = "rpds_py-0.28.0-cp310-cp310-win32.whl", hash = "sha256:f9174471d6920cbc5e82a7822de8dfd4dcea86eb828b04fc8c6519a77b0ee51e"}, - {file = "rpds_py-0.28.0-cp310-cp310-win_amd64.whl", hash = "sha256:6e32dd207e2c4f8475257a3540ab8a93eff997abfa0a3fdb287cae0d6cd874b8"}, - {file = "rpds_py-0.28.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:03065002fd2e287725d95fbc69688e0c6daf6c6314ba38bdbaa3895418e09296"}, - {file = "rpds_py-0.28.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:28ea02215f262b6d078daec0b45344c89e161eab9526b0d898221d96fdda5f27"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25dbade8fbf30bcc551cb352376c0ad64b067e4fc56f90e22ba70c3ce205988c"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:3c03002f54cc855860bfdc3442928ffdca9081e73b5b382ed0b9e8efe6e5e205"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b9699fa7990368b22032baf2b2dce1f634388e4ffc03dfefaaac79f4695edc95"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b9b06fe1a75e05e0713f06ea0c89ecb6452210fd60e2f1b6ddc1067b990e08d9"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ac9f83e7b326a3f9ec3ef84cda98fb0a74c7159f33e692032233046e7fd15da2"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:0d3259ea9ad8743a75a43eb7819324cdab393263c91be86e2d1901ee65c314e0"}, - {file = "rpds_py-0.28.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9a7548b345f66f6695943b4ef6afe33ccd3f1b638bd9afd0f730dd255c249c9e"}, - {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9a40040aa388b037eb39416710fbcce9443498d2eaab0b9b45ae988b53f5c67"}, - {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8f60c7ea34e78c199acd0d3cda37a99be2c861dd2b8cf67399784f70c9f8e57d"}, - {file = "rpds_py-0.28.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:1571ae4292649100d743b26d5f9c63503bb1fedf538a8f29a98dce2d5ba6b4e6"}, - {file = "rpds_py-0.28.0-cp311-cp311-win32.whl", hash = "sha256:5cfa9af45e7c1140af7321fa0bef25b386ee9faa8928c80dc3a5360971a29e8c"}, - {file = "rpds_py-0.28.0-cp311-cp311-win_amd64.whl", hash = "sha256:dd8d86b5d29d1b74100982424ba53e56033dc47720a6de9ba0259cf81d7cecaa"}, - {file = "rpds_py-0.28.0-cp311-cp311-win_arm64.whl", hash = "sha256:4e27d3a5709cc2b3e013bf93679a849213c79ae0573f9b894b284b55e729e120"}, - {file = "rpds_py-0.28.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:6b4f28583a4f247ff60cd7bdda83db8c3f5b05a7a82ff20dd4b078571747708f"}, - {file = "rpds_py-0.28.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:d678e91b610c29c4b3d52a2c148b641df2b4676ffe47c59f6388d58b99cdc424"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e819e0e37a44a78e1383bf1970076e2ccc4dc8c2bbaa2f9bd1dc987e9afff628"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5ee514e0f0523db5d3fb171f397c54875dbbd69760a414dccf9d4d7ad628b5bd"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5f3fa06d27fdcee47f07a39e02862da0100cb4982508f5ead53ec533cd5fe55e"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:46959ef2e64f9e4a41fc89aa20dbca2b85531f9a72c21099a3360f35d10b0d5a"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8455933b4bcd6e83fde3fefc987a023389c4b13f9a58c8d23e4b3f6d13f78c84"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:ad50614a02c8c2962feebe6012b52f9802deec4263946cddea37aaf28dd25a66"}, - {file = "rpds_py-0.28.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e5deca01b271492553fdb6c7fd974659dce736a15bae5dad7ab8b93555bceb28"}, - {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:735f8495a13159ce6a0d533f01e8674cec0c57038c920495f87dcb20b3ddb48a"}, - {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:961ca621ff10d198bbe6ba4957decca61aa2a0c56695384c1d6b79bf61436df5"}, - {file = "rpds_py-0.28.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2374e16cc9131022e7d9a8f8d65d261d9ba55048c78f3b6e017971a4f5e6353c"}, - {file = "rpds_py-0.28.0-cp312-cp312-win32.whl", hash = "sha256:d15431e334fba488b081d47f30f091e5d03c18527c325386091f31718952fe08"}, - {file = "rpds_py-0.28.0-cp312-cp312-win_amd64.whl", hash = "sha256:a410542d61fc54710f750d3764380b53bf09e8c4edbf2f9141a82aa774a04f7c"}, - {file = "rpds_py-0.28.0-cp312-cp312-win_arm64.whl", hash = "sha256:1f0cfd1c69e2d14f8c892b893997fa9a60d890a0c8a603e88dca4955f26d1edd"}, - {file = "rpds_py-0.28.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:e9e184408a0297086f880556b6168fa927d677716f83d3472ea333b42171ee3b"}, - {file = "rpds_py-0.28.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:edd267266a9b0448f33dc465a97cfc5d467594b600fe28e7fa2f36450e03053a"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:85beb8b3f45e4e32f6802fb6cd6b17f615ef6c6a52f265371fb916fae02814aa"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:d2412be8d00a1b895f8ad827cc2116455196e20ed994bb704bf138fe91a42724"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cf128350d384b777da0e68796afdcebc2e9f63f0e9f242217754e647f6d32491"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a2036d09b363aa36695d1cc1a97b36865597f4478470b0697b5ee9403f4fe399"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8e1e9be4fa6305a16be628959188e4fd5cd6f1b0e724d63c6d8b2a8adf74ea6"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:0a403460c9dd91a7f23fc3188de6d8977f1d9603a351d5db6cf20aaea95b538d"}, - {file = "rpds_py-0.28.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d7366b6553cdc805abcc512b849a519167db8f5e5c3472010cd1228b224265cb"}, - {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5b43c6a3726efd50f18d8120ec0551241c38785b68952d240c45ea553912ac41"}, - {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:0cb7203c7bc69d7c1585ebb33a2e6074492d2fc21ad28a7b9d40457ac2a51ab7"}, - {file = "rpds_py-0.28.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:7a52a5169c664dfb495882adc75c304ae1d50df552fbd68e100fdc719dee4ff9"}, - {file = "rpds_py-0.28.0-cp313-cp313-win32.whl", hash = "sha256:2e42456917b6687215b3e606ab46aa6bca040c77af7df9a08a6dcfe8a4d10ca5"}, - {file = "rpds_py-0.28.0-cp313-cp313-win_amd64.whl", hash = "sha256:e0a0311caedc8069d68fc2bf4c9019b58a2d5ce3cd7cb656c845f1615b577e1e"}, - {file = "rpds_py-0.28.0-cp313-cp313-win_arm64.whl", hash = "sha256:04c1b207ab8b581108801528d59ad80aa83bb170b35b0ddffb29c20e411acdc1"}, - {file = "rpds_py-0.28.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:f296ea3054e11fc58ad42e850e8b75c62d9a93a9f981ad04b2e5ae7d2186ff9c"}, - {file = "rpds_py-0.28.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:5a7306c19b19005ad98468fcefeb7100b19c79fc23a5f24a12e06d91181193fa"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5d9b86aa501fed9862a443c5c3116f6ead8bc9296185f369277c42542bd646b"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e5bbc701eff140ba0e872691d573b3d5d30059ea26e5785acba9132d10c8c31d"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a5690671cd672a45aa8616d7374fdf334a1b9c04a0cac3c854b1136e92374fe"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9f1d92ecea4fa12f978a367c32a5375a1982834649cdb96539dcdc12e609ab1a"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8d252db6b1a78d0a3928b6190156042d54c93660ce4d98290d7b16b5296fb7cc"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:d61b355c3275acb825f8777d6c4505f42b5007e357af500939d4a35b19177259"}, - {file = "rpds_py-0.28.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:acbe5e8b1026c0c580d0321c8aae4b0a1e1676861d48d6e8c6586625055b606a"}, - {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:8aa23b6f0fc59b85b4c7d89ba2965af274346f738e8d9fc2455763602e62fd5f"}, - {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:7b14b0c680286958817c22d76fcbca4800ddacef6f678f3a7c79a1fe7067fe37"}, - {file = "rpds_py-0.28.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:bcf1d210dfee61a6c86551d67ee1031899c0fdbae88b2d44a569995d43797712"}, - {file = "rpds_py-0.28.0-cp313-cp313t-win32.whl", hash = "sha256:3aa4dc0fdab4a7029ac63959a3ccf4ed605fee048ba67ce89ca3168da34a1342"}, - {file = "rpds_py-0.28.0-cp313-cp313t-win_amd64.whl", hash = "sha256:7b7d9d83c942855e4fdcfa75d4f96f6b9e272d42fffcb72cd4bb2577db2e2907"}, - {file = "rpds_py-0.28.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:dcdcb890b3ada98a03f9f2bb108489cdc7580176cb73b4f2d789e9a1dac1d472"}, - {file = "rpds_py-0.28.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:f274f56a926ba2dc02976ca5b11c32855cbd5925534e57cfe1fda64e04d1add2"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fe0438ac4a29a520ea94c8c7f1754cdd8feb1bc490dfda1bfd990072363d527"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:8a358a32dd3ae50e933347889b6af9a1bdf207ba5d1a3f34e1a38cd3540e6733"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e80848a71c78aa328fefaba9c244d588a342c8e03bda518447b624ea64d1ff56"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f586db2e209d54fe177e58e0bc4946bea5fb0102f150b1b2f13de03e1f0976f8"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ae8ee156d6b586e4292491e885d41483136ab994e719a13458055bec14cf370"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:a805e9b3973f7e27f7cab63a6b4f61d90f2e5557cff73b6e97cd5b8540276d3d"}, - {file = "rpds_py-0.28.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5d3fd16b6dc89c73a4da0b4ac8b12a7ecc75b2864b95c9e5afed8003cb50a728"}, - {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:6796079e5d24fdaba6d49bda28e2c47347e89834678f2bc2c1b4fc1489c0fb01"}, - {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:76500820c2af232435cbe215e3324c75b950a027134e044423f59f5b9a1ba515"}, - {file = "rpds_py-0.28.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:bbdc5640900a7dbf9dd707fe6388972f5bbd883633eb68b76591044cfe346f7e"}, - {file = "rpds_py-0.28.0-cp314-cp314-win32.whl", hash = "sha256:adc8aa88486857d2b35d75f0640b949759f79dc105f50aa2c27816b2e0dd749f"}, - {file = "rpds_py-0.28.0-cp314-cp314-win_amd64.whl", hash = "sha256:66e6fa8e075b58946e76a78e69e1a124a21d9a48a5b4766d15ba5b06869d1fa1"}, - {file = "rpds_py-0.28.0-cp314-cp314-win_arm64.whl", hash = "sha256:a6fe887c2c5c59413353b7c0caff25d0e566623501ccfff88957fa438a69377d"}, - {file = "rpds_py-0.28.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7a69df082db13c7070f7b8b1f155fa9e687f1d6aefb7b0e3f7231653b79a067b"}, - {file = "rpds_py-0.28.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:b1cde22f2c30ebb049a9e74c5374994157b9b70a16147d332f89c99c5960737a"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5338742f6ba7a51012ea470bd4dc600a8c713c0c72adaa0977a1b1f4327d6592"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1460ebde1bcf6d496d80b191d854adedcc619f84ff17dc1c6d550f58c9efbba"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e3eb248f2feba84c692579257a043a7699e28a77d86c77b032c1d9fbb3f0219c"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bd3bbba5def70b16cd1c1d7255666aad3b290fbf8d0fe7f9f91abafb73611a91"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3114f4db69ac5a1f32e7e4d1cbbe7c8f9cf8217f78e6e002cedf2d54c2a548ed"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:4b0cb8a906b1a0196b863d460c0222fb8ad0f34041568da5620f9799b83ccf0b"}, - {file = "rpds_py-0.28.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:cf681ac76a60b667106141e11a92a3330890257e6f559ca995fbb5265160b56e"}, - {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:1e8ee6413cfc677ce8898d9cde18cc3a60fc2ba756b0dec5b71eb6eb21c49fa1"}, - {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:b3072b16904d0b5572a15eb9d31c1954e0d3227a585fc1351aa9878729099d6c"}, - {file = "rpds_py-0.28.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:b670c30fd87a6aec281c3c9896d3bae4b205fd75d79d06dc87c2503717e46092"}, - {file = "rpds_py-0.28.0-cp314-cp314t-win32.whl", hash = "sha256:8014045a15b4d2b3476f0a287fcc93d4f823472d7d1308d47884ecac9e612be3"}, - {file = "rpds_py-0.28.0-cp314-cp314t-win_amd64.whl", hash = "sha256:7a4e59c90d9c27c561eb3160323634a9ff50b04e4f7820600a2beb0ac90db578"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:f5e7101145427087e493b9c9b959da68d357c28c562792300dd21a095118ed16"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:31eb671150b9c62409a888850aaa8e6533635704fe2b78335f9aaf7ff81eec4d"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48b55c1f64482f7d8bd39942f376bfdf2f6aec637ee8c805b5041e14eeb771db"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:24743a7b372e9a76171f6b69c01aedf927e8ac3e16c474d9fe20d552a8cb45c7"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:389c29045ee8bbb1627ea190b4976a310a295559eaf9f1464a1a6f2bf84dde78"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:23690b5827e643150cf7b49569679ec13fe9a610a15949ed48b85eb7f98f34ec"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6f0c9266c26580e7243ad0d72fc3e01d6b33866cfab5084a6da7576bcf1c4f72"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:4c6c4db5d73d179746951486df97fd25e92396be07fc29ee8ff9a8f5afbdfb27"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a3b695a8fa799dd2cfdb4804b37096c5f6dba1ac7f48a7fbf6d0485bcd060316"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:6aa1bfce3f83baf00d9c5fcdbba93a3ab79958b4c7d7d1f55e7fe68c20e63912"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:7b0f9dceb221792b3ee6acb5438eb1f02b0cb2c247796a72b016dcc92c6de829"}, - {file = "rpds_py-0.28.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:5d0145edba8abd3db0ab22b5300c99dc152f5c9021fab861be0f0544dc3cbc5f"}, - {file = "rpds_py-0.28.0.tar.gz", hash = "sha256:abd4df20485a0983e2ca334a216249b6186d6e3c1627e106651943dbdb791aea"}, + {file = "rpds_py-0.30.0-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:679ae98e00c0e8d68a7fda324e16b90fd5260945b45d3b824c892cec9eea3288"}, + {file = "rpds_py-0.30.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4cc2206b76b4f576934f0ed374b10d7ca5f457858b157ca52064bdfc26b9fc00"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:389a2d49eded1896c3d48b0136ead37c48e221b391c052fba3f4055c367f60a6"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:32c8528634e1bf7121f3de08fa85b138f4e0dc47657866630611b03967f041d7"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f207f69853edd6f6700b86efb84999651baf3789e78a466431df1331608e5324"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:67b02ec25ba7a9e8fa74c63b6ca44cf5707f2fbfadae3ee8e7494297d56aa9df"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0c0e95f6819a19965ff420f65578bacb0b00f251fefe2c8b23347c37174271f3"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_31_riscv64.whl", hash = "sha256:a452763cc5198f2f98898eb98f7569649fe5da666c2dc6b5ddb10fde5a574221"}, + {file = "rpds_py-0.30.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e0b65193a413ccc930671c55153a03ee57cecb49e6227204b04fae512eb657a7"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:858738e9c32147f78b3ac24dc0edb6610000e56dc0f700fd5f651d0a0f0eb9ff"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:da279aa314f00acbb803da1e76fa18666778e8a8f83484fba94526da5de2cba7"}, + {file = "rpds_py-0.30.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:7c64d38fb49b6cdeda16ab49e35fe0da2e1e9b34bc38bd78386530f218b37139"}, + {file = "rpds_py-0.30.0-cp310-cp310-win32.whl", hash = "sha256:6de2a32a1665b93233cde140ff8b3467bdb9e2af2b91079f0333a0974d12d464"}, + {file = "rpds_py-0.30.0-cp310-cp310-win_amd64.whl", hash = "sha256:1726859cd0de969f88dc8673bdd954185b9104e05806be64bcd87badbe313169"}, + {file = "rpds_py-0.30.0-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:a2bffea6a4ca9f01b3f8e548302470306689684e61602aa3d141e34da06cf425"}, + {file = "rpds_py-0.30.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:dc4f992dfe1e2bc3ebc7444f6c7051b4bc13cd8e33e43511e8ffd13bf407010d"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:422c3cb9856d80b09d30d2eb255d0754b23e090034e1deb4083f8004bd0761e4"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:07ae8a593e1c3c6b82ca3292efbe73c30b61332fd612e05abee07c79359f292f"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12f90dd7557b6bd57f40abe7747e81e0c0b119bef015ea7726e69fe550e394a4"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:99b47d6ad9a6da00bec6aabe5a6279ecd3c06a329d4aa4771034a21e335c3a97"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33f559f3104504506a44bb666b93a33f5d33133765b0c216a5bf2f1e1503af89"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_31_riscv64.whl", hash = "sha256:946fe926af6e44f3697abbc305ea168c2c31d3e3ef1058cf68f379bf0335a78d"}, + {file = "rpds_py-0.30.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:495aeca4b93d465efde585977365187149e75383ad2684f81519f504f5c13038"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:d9a0ca5da0386dee0655b4ccdf46119df60e0f10da268d04fe7cc87886872ba7"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8d6d1cc13664ec13c1b84241204ff3b12f9bb82464b8ad6e7a5d3486975c2eed"}, + {file = "rpds_py-0.30.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:3896fa1be39912cf0757753826bc8bdc8ca331a28a7c4ae46b7a21280b06bb85"}, + {file = "rpds_py-0.30.0-cp311-cp311-win32.whl", hash = "sha256:55f66022632205940f1827effeff17c4fa7ae1953d2b74a8581baaefb7d16f8c"}, + {file = "rpds_py-0.30.0-cp311-cp311-win_amd64.whl", hash = "sha256:a51033ff701fca756439d641c0ad09a41d9242fa69121c7d8769604a0a629825"}, + {file = "rpds_py-0.30.0-cp311-cp311-win_arm64.whl", hash = "sha256:47b0ef6231c58f506ef0b74d44e330405caa8428e770fec25329ed2cb971a229"}, + {file = "rpds_py-0.30.0-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:a161f20d9a43006833cd7068375a94d035714d73a172b681d8881820600abfad"}, + {file = "rpds_py-0.30.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:6abc8880d9d036ecaafe709079969f56e876fcf107f7a8e9920ba6d5a3878d05"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ca28829ae5f5d569bb62a79512c842a03a12576375d5ece7d2cadf8abe96ec28"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a1010ed9524c73b94d15919ca4d41d8780980e1765babf85f9a2f90d247153dd"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8d1736cfb49381ba528cd5baa46f82fdc65c06e843dab24dd70b63d09121b3f"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d948b135c4693daff7bc2dcfc4ec57237a29bd37e60c2fabf5aff2bbacf3e2f1"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47f236970bccb2233267d89173d3ad2703cd36a0e2a6e92d0560d333871a3d23"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_31_riscv64.whl", hash = "sha256:2e6ecb5a5bcacf59c3f912155044479af1d0b6681280048b338b28e364aca1f6"}, + {file = "rpds_py-0.30.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a8fa71a2e078c527c3e9dc9fc5a98c9db40bcc8a92b4e8858e36d329f8684b51"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:73c67f2db7bc334e518d097c6d1e6fed021bbc9b7d678d6cc433478365d1d5f5"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:5ba103fb455be00f3b1c2076c9d4264bfcb037c976167a6047ed82f23153f02e"}, + {file = "rpds_py-0.30.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:7cee9c752c0364588353e627da8a7e808a66873672bcb5f52890c33fd965b394"}, + {file = "rpds_py-0.30.0-cp312-cp312-win32.whl", hash = "sha256:1ab5b83dbcf55acc8b08fc62b796ef672c457b17dbd7820a11d6c52c06839bdf"}, + {file = "rpds_py-0.30.0-cp312-cp312-win_amd64.whl", hash = "sha256:a090322ca841abd453d43456ac34db46e8b05fd9b3b4ac0c78bcde8b089f959b"}, + {file = "rpds_py-0.30.0-cp312-cp312-win_arm64.whl", hash = "sha256:669b1805bd639dd2989b281be2cfd951c6121b65e729d9b843e9639ef1fd555e"}, + {file = "rpds_py-0.30.0-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:f83424d738204d9770830d35290ff3273fbb02b41f919870479fab14b9d303b2"}, + {file = "rpds_py-0.30.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:e7536cd91353c5273434b4e003cbda89034d67e7710eab8761fd918ec6c69cf8"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2771c6c15973347f50fece41fc447c054b7ac2ae0502388ce3b6738cd366e3d4"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:0a59119fc6e3f460315fe9d08149f8102aa322299deaa5cab5b40092345c2136"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:76fec018282b4ead0364022e3c54b60bf368b9d926877957a8624b58419169b7"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:692bef75a5525db97318e8cd061542b5a79812d711ea03dbc1f6f8dbb0c5f0d2"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9027da1ce107104c50c81383cae773ef5c24d296dd11c99e2629dbd7967a20c6"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_31_riscv64.whl", hash = "sha256:9cf69cdda1f5968a30a359aba2f7f9aa648a9ce4b580d6826437f2b291cfc86e"}, + {file = "rpds_py-0.30.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:a4796a717bf12b9da9d3ad002519a86063dcac8988b030e405704ef7d74d2d9d"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:5d4c2aa7c50ad4728a094ebd5eb46c452e9cb7edbfdb18f9e1221f597a73e1e7"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ba81a9203d07805435eb06f536d95a266c21e5b2dfbf6517748ca40c98d19e31"}, + {file = "rpds_py-0.30.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:945dccface01af02675628334f7cf49c2af4c1c904748efc5cf7bbdf0b579f95"}, + {file = "rpds_py-0.30.0-cp313-cp313-win32.whl", hash = "sha256:b40fb160a2db369a194cb27943582b38f79fc4887291417685f3ad693c5a1d5d"}, + {file = "rpds_py-0.30.0-cp313-cp313-win_amd64.whl", hash = "sha256:806f36b1b605e2d6a72716f321f20036b9489d29c51c91f4dd29a3e3afb73b15"}, + {file = "rpds_py-0.30.0-cp313-cp313-win_arm64.whl", hash = "sha256:d96c2086587c7c30d44f31f42eae4eac89b60dabbac18c7669be3700f13c3ce1"}, + {file = "rpds_py-0.30.0-cp313-cp313t-macosx_10_12_x86_64.whl", hash = "sha256:eb0b93f2e5c2189ee831ee43f156ed34e2a89a78a66b98cadad955972548be5a"}, + {file = "rpds_py-0.30.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:922e10f31f303c7c920da8981051ff6d8c1a56207dbdf330d9047f6d30b70e5e"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cdc62c8286ba9bf7f47befdcea13ea0e26bf294bda99758fd90535cbaf408000"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:47f9a91efc418b54fb8190a6b4aa7813a23fb79c51f4bb84e418f5476c38b8db"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1f3587eb9b17f3789ad50824084fa6f81921bbf9a795826570bda82cb3ed91f2"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:39c02563fc592411c2c61d26b6c5fe1e51eaa44a75aa2c8735ca88b0d9599daa"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:51a1234d8febafdfd33a42d97da7a43f5dcb120c1060e352a3fbc0c6d36e2083"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_31_riscv64.whl", hash = "sha256:eb2c4071ab598733724c08221091e8d80e89064cd472819285a9ab0f24bcedb9"}, + {file = "rpds_py-0.30.0-cp313-cp313t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:6bdfdb946967d816e6adf9a3d8201bfad269c67efe6cefd7093ef959683c8de0"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:c77afbd5f5250bf27bf516c7c4a016813eb2d3e116139aed0096940c5982da94"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_i686.whl", hash = "sha256:61046904275472a76c8c90c9ccee9013d70a6d0f73eecefd38c1ae7c39045a08"}, + {file = "rpds_py-0.30.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:4c5f36a861bc4b7da6516dbdf302c55313afa09b81931e8280361a4f6c9a2d27"}, + {file = "rpds_py-0.30.0-cp313-cp313t-win32.whl", hash = "sha256:3d4a69de7a3e50ffc214ae16d79d8fbb0922972da0356dcf4d0fdca2878559c6"}, + {file = "rpds_py-0.30.0-cp313-cp313t-win_amd64.whl", hash = "sha256:f14fc5df50a716f7ece6a80b6c78bb35ea2ca47c499e422aa4463455dd96d56d"}, + {file = "rpds_py-0.30.0-cp314-cp314-macosx_10_12_x86_64.whl", hash = "sha256:68f19c879420aa08f61203801423f6cd5ac5f0ac4ac82a2368a9fcd6a9a075e0"}, + {file = "rpds_py-0.30.0-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:ec7c4490c672c1a0389d319b3a9cfcd098dcdc4783991553c332a15acf7249be"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f251c812357a3fed308d684a5079ddfb9d933860fc6de89f2b7ab00da481e65f"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:ac98b175585ecf4c0348fd7b29c3864bda53b805c773cbf7bfdaffc8070c976f"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3e62880792319dbeb7eb866547f2e35973289e7d5696c6e295476448f5b63c87"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4e7fc54e0900ab35d041b0601431b0a0eb495f0851a0639b6ef90f7741b39a18"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:47e77dc9822d3ad616c3d5759ea5631a75e5809d5a28707744ef79d7a1bcfcad"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_31_riscv64.whl", hash = "sha256:b4dc1a6ff022ff85ecafef7979a2c6eb423430e05f1165d6688234e62ba99a07"}, + {file = "rpds_py-0.30.0-cp314-cp314-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:4559c972db3a360808309e06a74628b95eaccbf961c335c8fe0d590cf587456f"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:0ed177ed9bded28f8deb6ab40c183cd1192aa0de40c12f38be4d59cd33cb5c65"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_i686.whl", hash = "sha256:ad1fa8db769b76ea911cb4e10f049d80bf518c104f15b3edb2371cc65375c46f"}, + {file = "rpds_py-0.30.0-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:46e83c697b1f1c72b50e5ee5adb4353eef7406fb3f2043d64c33f20ad1c2fc53"}, + {file = "rpds_py-0.30.0-cp314-cp314-win32.whl", hash = "sha256:ee454b2a007d57363c2dfd5b6ca4a5d7e2c518938f8ed3b706e37e5d470801ed"}, + {file = "rpds_py-0.30.0-cp314-cp314-win_amd64.whl", hash = "sha256:95f0802447ac2d10bcc69f6dc28fe95fdf17940367b21d34e34c737870758950"}, + {file = "rpds_py-0.30.0-cp314-cp314-win_arm64.whl", hash = "sha256:613aa4771c99f03346e54c3f038e4cc574ac09a3ddfb0e8878487335e96dead6"}, + {file = "rpds_py-0.30.0-cp314-cp314t-macosx_10_12_x86_64.whl", hash = "sha256:7e6ecfcb62edfd632e56983964e6884851786443739dbfe3582947e87274f7cb"}, + {file = "rpds_py-0.30.0-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:a1d0bc22a7cdc173fedebb73ef81e07faef93692b8c1ad3733b67e31e1b6e1b8"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d08f00679177226c4cb8c5265012eea897c8ca3b93f429e546600c971bcbae7"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:5965af57d5848192c13534f90f9dd16464f3c37aaf166cc1da1cae1fd5a34898"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9a4e86e34e9ab6b667c27f3211ca48f73dba7cd3d90f8d5b11be56e5dbc3fb4e"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d3e6b26f2c785d65cc25ef1e5267ccbe1b069c5c21b8cc724efee290554419"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:626a7433c34566535b6e56a1b39a7b17ba961e97ce3b80ec62e6f1312c025551"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_31_riscv64.whl", hash = "sha256:acd7eb3f4471577b9b5a41baf02a978e8bdeb08b4b355273994f8b87032000a8"}, + {file = "rpds_py-0.30.0-cp314-cp314t-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fe5fa731a1fa8a0a56b0977413f8cacac1768dad38d16b3a296712709476fbd5"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:74a3243a411126362712ee1524dfc90c650a503502f135d54d1b352bd01f2404"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_i686.whl", hash = "sha256:3e8eeb0544f2eb0d2581774be4c3410356eba189529a6b3e36bbbf9696175856"}, + {file = "rpds_py-0.30.0-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:dbd936cde57abfee19ab3213cf9c26be06d60750e60a8e4dd85d1ab12c8b1f40"}, + {file = "rpds_py-0.30.0-cp314-cp314t-win32.whl", hash = "sha256:dc824125c72246d924f7f796b4f63c1e9dc810c7d9e2355864b3c3a73d59ade0"}, + {file = "rpds_py-0.30.0-cp314-cp314t-win_amd64.whl", hash = "sha256:27f4b0e92de5bfbc6f86e43959e6edd1425c33b5e69aab0984a72047f2bcf1e3"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-macosx_10_12_x86_64.whl", hash = "sha256:c2262bdba0ad4fc6fb5545660673925c2d2a5d9e2e0fb603aad545427be0fc58"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-macosx_11_0_arm64.whl", hash = "sha256:ee6af14263f25eedc3bb918a3c04245106a42dfd4f5c2285ea6f997b1fc3f89a"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3adbb8179ce342d235c31ab8ec511e66c73faa27a47e076ccc92421add53e2bb"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:250fa00e9543ac9b97ac258bd37367ff5256666122c2d0f2bc97577c60a1818c"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9854cf4f488b3d57b9aaeb105f06d78e5529d3145b1e4a41750167e8c213c6d3"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:993914b8e560023bc0a8bf742c5f303551992dcb85e247b1e5c7f4a7d145bda5"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58edca431fb9b29950807e301826586e5bbf24163677732429770a697ffe6738"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_31_riscv64.whl", hash = "sha256:dea5b552272a944763b34394d04577cf0f9bd013207bc32323b5a89a53cf9c2f"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:ba3af48635eb83d03f6c9735dfb21785303e73d22ad03d489e88adae6eab8877"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:dff13836529b921e22f15cb099751209a60009731a68519630a24d61f0b1b30a"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_i686.whl", hash = "sha256:1b151685b23929ab7beec71080a8889d4d6d9fa9a983d213f07121205d48e2c4"}, + {file = "rpds_py-0.30.0-pp311-pypy311_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:ac37f9f516c51e5753f27dfdef11a88330f04de2d564be3991384b2f3535d02e"}, + {file = "rpds_py-0.30.0.tar.gz", hash = "sha256:dd8ff7cf90014af0c0f787eea34794ebf6415242ee1d6fa91eaba725cc441e84"}, ] [[package]] @@ -2928,15 +2920,15 @@ doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "sentry-sdk" -version = "2.44.0" +version = "2.46.0" description = "Python client for Sentry (https://sentry.io)" optional = true python-versions = ">=3.6" groups = ["main"] markers = "extra == \"sentry\" or extra == \"all\"" files = [ - {file = "sentry_sdk-2.44.0-py2.py3-none-any.whl", hash = "sha256:9e36a0372b881e8f92fdbff4564764ce6cec4b7f25424d0a3a8d609c9e4651a7"}, - {file = "sentry_sdk-2.44.0.tar.gz", hash = "sha256:5b1fe54dfafa332e900b07dd8f4dfe35753b64e78e7d9b1655a28fd3065e2493"}, + {file = "sentry_sdk-2.46.0-py2.py3-none-any.whl", hash = "sha256:4eeeb60198074dff8d066ea153fa6f241fef1668c10900ea53a4200abc8da9b1"}, + {file = "sentry_sdk-2.46.0.tar.gz", hash = "sha256:91821a23460725734b7741523021601593f35731808afc0bb2ba46c27b8acd91"}, ] [package.dependencies] @@ -2975,6 +2967,7 @@ openai = ["openai (>=1.0.0)", "tiktoken (>=0.3.0)"] openfeature = ["openfeature-sdk (>=0.7.1)"] opentelemetry = ["opentelemetry-distro (>=0.35b0)"] opentelemetry-experimental = ["opentelemetry-distro"] +opentelemetry-otlp = ["opentelemetry-distro[otlp] (>=0.35b0)"] pure-eval = ["asttokens", "executing", "pure_eval"] pydantic-ai = ["pydantic-ai (>=1.0.0)"] pymongo = ["pymongo (>=3.1)"] @@ -3107,6 +3100,22 @@ files = [ {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, ] +[[package]] +name = "sqlglot" +version = "28.0.0" +description = "An easily customizable SQL parser and transpiler" +optional = false +python-versions = ">=3.9" +groups = ["dev"] +files = [ + {file = "sqlglot-28.0.0-py3-none-any.whl", hash = "sha256:ac1778e7fa4812f4f7e5881b260632fc167b00ca4c1226868891fb15467122e4"}, + {file = "sqlglot-28.0.0.tar.gz", hash = "sha256:cc9a651ef4182e61dac58aa955e5fb21845a5865c6a4d7d7b5a7857450285ad4"}, +] + +[package.extras] +dev = ["duckdb (>=0.6)", "maturin (>=1.4,<2.0)", "mypy", "pandas", "pandas-stubs", "pdoc", "pre-commit", "pyperf", "python-dateutil", "pytz", "ruff (==0.7.2)", "types-python-dateutil", "types-pytz", "typing_extensions"] +rs = ["sqlglotrs (==0.7.3)"] + [[package]] name = "systemd-python" version = "235" @@ -3361,14 +3370,14 @@ twisted = "*" [[package]] name = "types-bleach" -version = "6.2.0.20250809" +version = "6.3.0.20251115" description = "Typing stubs for bleach" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "types_bleach-6.2.0.20250809-py3-none-any.whl", hash = "sha256:0b372a75117947d9ac8a31ae733fd0f8d92ec75c4772e7b37093ba3fa5b48fb9"}, - {file = "types_bleach-6.2.0.20250809.tar.gz", hash = "sha256:188d7a1119f6c953140b513ed57ba4213755695815472c19d0c22ac09c79b90b"}, + {file = "types_bleach-6.3.0.20251115-py3-none-any.whl", hash = "sha256:f81e7cf4ebac3f3d60b66b3fd5236c324e65037d1b28d22c94d5b457f0b98f42"}, + {file = "types_bleach-6.3.0.20251115.tar.gz", hash = "sha256:96911b20f169a18524d03b61fa7e98a08c411292f7cdb5dc191057f55dad9ae3"}, ] [package.dependencies] @@ -3403,14 +3412,14 @@ files = [ [[package]] name = "types-jsonschema" -version = "4.25.1.20250822" +version = "4.25.1.20251009" description = "Typing stubs for jsonschema" optional = false python-versions = ">=3.9" groups = ["dev"] files = [ - {file = "types_jsonschema-4.25.1.20250822-py3-none-any.whl", hash = "sha256:f82c2d7fa1ce1c0b84ba1de4ed6798469768188884db04e66421913a4e181294"}, - {file = "types_jsonschema-4.25.1.20250822.tar.gz", hash = "sha256:aac69ed4b23f49aaceb7fcb834141d61b9e4e6a7f6008cb2f0d3b831dfa8464a"}, + {file = "types_jsonschema-4.25.1.20251009-py3-none-any.whl", hash = "sha256:f30b329037b78e7a60146b1146feb0b6fb0b71628637584409bada83968dad3e"}, + {file = "types_jsonschema-4.25.1.20251009.tar.gz", hash = "sha256:75d0f5c5dd18dc23b664437a0c1a625743e8d2e665ceaf3aecb29841f3a5f97f"}, ] [package.dependencies] @@ -3732,4 +3741,4 @@ url-preview = ["lxml"] [metadata] lock-version = "2.1" python-versions = ">=3.10.0,<4.0.0" -content-hash = "17d5a54a056493688beb3265a3dfedf2214ebea8457b3e0ff626b7e640eddfba" +content-hash = "8063b3f9a676e166ea92f0b88cf48267ba66b332c677f63c8b73ec552fe53132" diff --git a/pyproject.toml b/pyproject.toml index f796baaddd..98b900349f 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [project] name = "matrix-synapse" -version = "1.143.0" +version = "1.144.0" description = "Homeserver for the Matrix decentralised comms protocol" readme = "README.rst" authors = [ @@ -287,6 +287,8 @@ extend-safe-fixes = [ "UP007", # pyupgrade rules compatible with Python >= 3.10 "UP045", + # Allow ruff to automatically fix trailing spaces within a multi-line string/comment. + "W293" ] [tool.ruff.lint.isort] @@ -395,6 +397,9 @@ towncrier = ">=18.6.0rc1" # Used for checking the Poetry lockfile tomli = ">=1.2.3" +# Used for checking the schema delta files +sqlglot = ">=28.0.0" + [build-system] # The upper bounds here are defensive, intended to prevent situations like @@ -420,7 +425,8 @@ build-backend = "poetry.core.masonry.api" # We skip: # - free-threaded cpython builds: these are not currently supported. # - i686: We don't support 32-bit platforms. -skip = "cp3??t-* *i686*" +# - *macosx*: we don't support building wheels for MacOS. +skip = "cp3??t-* *i686* *macosx*" # Enable non-default builds. See the list of available options: # https://cibuildwheel.pypa.io/en/stable/options#enable # @@ -445,7 +451,3 @@ test-command = "python -c 'from synapse.synapse_rust import sum_as_string; print [tool.cibuildwheel.linux] # Wrap the repair command to correctly rename the built cpython wheels as ABI3. repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py -w {dest_dir} {wheel}" - -[tool.cibuildwheel.macos] -# Wrap the repair command to correctly rename the built cpython wheels as ABI3. -repair-wheel-command = "./.ci/scripts/auditwheel_wrapper.py --require-archs {delocate_archs} -w {dest_dir} {wheel}" diff --git a/rust/src/duration.rs b/rust/src/duration.rs new file mode 100644 index 0000000000..a3dbe919b2 --- /dev/null +++ b/rust/src/duration.rs @@ -0,0 +1,56 @@ +/* + * This file is licensed under the Affero General Public License (AGPL) version 3. + * + * Copyright (C) 2025 Element Creations, Ltd + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * See the GNU Affero General Public License for more details: + * . + */ + +use once_cell::sync::OnceCell; +use pyo3::{ + types::{IntoPyDict, PyAnyMethods}, + Bound, BoundObject, IntoPyObject, Py, PyAny, PyErr, PyResult, Python, +}; + +/// A reference to the `synapse.util.duration` module. +static DURATION: OnceCell> = OnceCell::new(); + +/// Access to the `synapse.util.duration` module. +fn duration_module(py: Python<'_>) -> PyResult<&Bound<'_, PyAny>> { + Ok(DURATION + .get_or_try_init(|| py.import("synapse.util.duration").map(Into::into))? + .bind(py)) +} + +/// Mirrors the `synapse.util.duration.Duration` Python class. +pub struct SynapseDuration { + microseconds: u64, +} + +impl SynapseDuration { + /// For now we only need to create durations from milliseconds. + pub fn from_milliseconds(milliseconds: u64) -> Self { + Self { + microseconds: milliseconds * 1_000, + } + } +} + +impl<'py> IntoPyObject<'py> for &SynapseDuration { + type Target = PyAny; + type Output = Bound<'py, Self::Target>; + type Error = PyErr; + + fn into_pyobject(self, py: Python<'py>) -> Result { + let duration_module = duration_module(py)?; + let kwargs = [("microseconds", self.microseconds)].into_py_dict(py)?; + let duration_instance = duration_module.call_method("Duration", (), Some(&kwargs))?; + Ok(duration_instance.into_bound()) + } +} diff --git a/rust/src/lib.rs b/rust/src/lib.rs index 6522148fa1..fe880af2ea 100644 --- a/rust/src/lib.rs +++ b/rust/src/lib.rs @@ -5,6 +5,7 @@ use pyo3::prelude::*; use pyo3_log::ResetHandle; pub mod acl; +pub mod duration; pub mod errors; pub mod events; pub mod http; diff --git a/rust/src/rendezvous/mod.rs b/rust/src/rendezvous/mod.rs index 848b5035bb..9a6da9fcc3 100644 --- a/rust/src/rendezvous/mod.rs +++ b/rust/src/rendezvous/mod.rs @@ -35,6 +35,7 @@ use ulid::Ulid; use self::session::Session; use crate::{ + duration::SynapseDuration, errors::{NotFoundError, SynapseError}, http::{http_request_from_twisted, http_response_to_twisted, HeaderMapPyExt}, UnwrapInfallible, @@ -132,6 +133,8 @@ impl RendezvousHandler { .unwrap_infallible() .unbind(); + let eviction_duration = SynapseDuration::from_milliseconds(eviction_interval); + // Construct a Python object so that we can get a reference to the // evict method and schedule it to run. let self_ = Py::new( @@ -149,7 +152,7 @@ impl RendezvousHandler { let evict = self_.getattr(py, "_evict")?; homeserver.call_method0("get_clock")?.call_method( "looping_call", - (evict, eviction_interval), + (evict, &eviction_duration), None, )?; diff --git a/schema/synapse-config.schema.yaml b/schema/synapse-config.schema.yaml index 24fcd62b79..4e391e0326 100644 --- a/schema/synapse-config.schema.yaml +++ b/schema/synapse-config.schema.yaml @@ -1,5 +1,5 @@ $schema: https://famedly.github.io/synapse/latest/schema/v1/meta.schema.json -$id: https://famedly.github.io/synapse/schema/synapse/v1.143/synapse-config.schema.json +$id: https://famedly.github.io/synapse/schema/synapse/v1.144/synapse-config.schema.json type: object properties: famedly_maximum_refresh_token_lifetime: diff --git a/scripts-dev/check_schema_delta.py b/scripts-dev/check_schema_delta.py index dd96c904bb..d344083148 100755 --- a/scripts-dev/check_schema_delta.py +++ b/scripts-dev/check_schema_delta.py @@ -9,15 +9,11 @@ import click import git +import sqlglot +import sqlglot.expressions SCHEMA_FILE_REGEX = re.compile(r"^synapse/storage/schema/(.*)/delta/(.*)/(.*)$") -INDEX_CREATION_REGEX = re.compile( - r"CREATE .*INDEX .*ON ([a-z_0-9]+)", flags=re.IGNORECASE -) -INDEX_DELETION_REGEX = re.compile(r"DROP .*INDEX ([a-z_0-9]+)", flags=re.IGNORECASE) -TABLE_CREATION_REGEX = re.compile( - r"CREATE .*TABLE.* ([a-z_0-9]+)\s*\(", flags=re.IGNORECASE -) + # The base branch we want to check against. We use the main development branch # on the assumption that is what we are developing against. @@ -141,6 +137,9 @@ def main(force_colors: bool) -> None: color=force_colors, ) + # Mark this run as not successful, but continue so that we report *all* + # errors. + return_code = 1 else: click.secho( f"All deltas are in the correct folder: {current_schema_version}!", @@ -153,60 +152,90 @@ def main(force_colors: bool) -> None: # and delta files are also numbered in order. changed_delta_files.sort() - # Now check that we're not trying to create or drop indices. If we want to - # do that they should be in background updates. The exception is when we - # create indices on tables we've just created. - created_tables = set() - for delta_file in changed_delta_files: - with open(delta_file) as fd: - delta_lines = fd.readlines() - - for line in delta_lines: - # Strip SQL comments - line = line.split("--", maxsplit=1)[0] - - # Check and track any tables we create - match = TABLE_CREATION_REGEX.search(line) - if match: - table_name = match.group(1) - created_tables.add(table_name) - - # Check for dropping indices, these are always banned - match = INDEX_DELETION_REGEX.search(line) - if match: - clause = match.group() - - click.secho( - f"Found delta with index deletion: '{clause}' in {delta_file}", - fg="red", - bold=True, - color=force_colors, - ) - click.secho( - " ↪ These should be in background updates.", - ) - return_code = 1 - - # Check for index creation, which is only allowed for tables we've - # created. - match = INDEX_CREATION_REGEX.search(line) - if match: - clause = match.group() - table_name = match.group(1) - if table_name not in created_tables: - click.secho( - f"Found delta with index creation for existing table: '{clause}' in {delta_file}", - fg="red", - bold=True, - color=force_colors, - ) - click.secho( - " ↪ These should be in background updates (or the table should be created in the same delta).", - ) - return_code = 1 + success = check_schema_delta(changed_delta_files, force_colors) + if not success: + return_code = 1 click.get_current_context().exit(return_code) +def check_schema_delta(delta_files: list[str], force_colors: bool) -> bool: + """Check that the given schema delta files do not create or drop indices + inappropriately. + + Index creation is only allowed on tables created in the same set of deltas. + + Index deletion is never allowed and should be done in background updates. + + Returns: + True if all checks succeeded, False if at least one failed. + """ + + # The tables created in this delta + created_tables = set[str]() + + # The indices created/dropped in this delta, each a tuple of (table_name, sql) + created_indices = list[tuple[str, str]]() + + # The indices dropped in this delta, just the sql + dropped_indices = list[str]() + + for delta_file in delta_files: + with open(delta_file) as fd: + delta_contents = fd.read() + + # Assume the SQL dialect from the file extension, defaulting to Postgres. + sql_lang = "postgres" + if delta_file.endswith(".sqlite"): + sql_lang = "sqlite" + + statements = sqlglot.parse(delta_contents, read=sql_lang) + + for statement in statements: + if isinstance(statement, sqlglot.expressions.Create): + if statement.kind == "TABLE": + assert isinstance(statement.this, sqlglot.expressions.Schema) + assert isinstance(statement.this.this, sqlglot.expressions.Table) + + table_name = statement.this.this.name + created_tables.add(table_name) + elif statement.kind == "INDEX": + assert isinstance(statement.this, sqlglot.expressions.Index) + + table_name = statement.this.args["table"].name + created_indices.append((table_name, statement.sql())) + elif isinstance(statement, sqlglot.expressions.Drop): + if statement.kind == "INDEX": + dropped_indices.append(statement.sql()) + + success = True + for table_name, clause in created_indices: + if table_name not in created_tables: + click.secho( + f"Found delta with index creation for existing table: '{clause}'", + fg="red", + bold=True, + color=force_colors, + ) + click.secho( + " ↪ These should be in background updates (or the table should be created in the same delta).", + ) + success = False + + for clause in dropped_indices: + click.secho( + f"Found delta with index deletion: '{clause}'", + fg="red", + bold=True, + color=force_colors, + ) + click.secho( + " ↪ These should be in background updates.", + ) + success = False + + return success + + if __name__ == "__main__": main() diff --git a/scripts-dev/complement.sh b/scripts-dev/complement.sh index c4d678b142..2447e0dc7b 100755 --- a/scripts-dev/complement.sh +++ b/scripts-dev/complement.sh @@ -72,153 +72,151 @@ For help on arguments to 'go test', run 'go help testflag'. EOF } -# parse our arguments -skip_docker_build="" -skip_complement_run="" -while [ $# -ge 1 ]; do +# We use a function to wrap the script logic so that we can use `return` to exit early +# if needed. This is particularly useful so that this script can be sourced by other +# scripts without exiting the calling subshell (composable). This allows us to share +# variables like `SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES` with other scripts. +# +# Returns an exit code of 0 on success, or 1 on failure. +main() { + # parse our arguments + skip_docker_build="" + skip_complement_run="" + while [ $# -ge 1 ]; do arg=$1 case "$arg" in - "-h") - usage - exit 1 - ;; - "-f"|"--fast") - skip_docker_build=1 - ;; - "--build-only") - skip_complement_run=1 - ;; - "-e"|"--editable") - use_editable_synapse=1 - ;; - "--rebuild-editable") - rebuild_editable_synapse=1 - ;; - *) - # unknown arg: presumably an argument to gotest. break the loop. - break + "-h") + usage + return 1 + ;; + "-f"|"--fast") + skip_docker_build=1 + ;; + "--build-only") + skip_complement_run=1 + ;; + "-e"|"--editable") + use_editable_synapse=1 + ;; + "--rebuild-editable") + rebuild_editable_synapse=1 + ;; + *) + # unknown arg: presumably an argument to gotest. break the loop. + break esac shift -done - -# enable buildkit for the docker builds -export DOCKER_BUILDKIT=1 - -# Determine whether to use the docker or podman container runtime. -if [ -n "$PODMAN" ]; then - export CONTAINER_RUNTIME=podman - export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock - export BUILDAH_FORMAT=docker - export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal -else - export CONTAINER_RUNTIME=docker -fi + done -# Change to the repository root -cd "$(dirname $0)/.." - -# Check for a user-specified Complement checkout -if [[ -z "$COMPLEMENT_DIR" ]]; then - COMPLEMENT_REF=${COMPLEMENT_REF:-main} - echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..." - wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz - tar -xzf ${COMPLEMENT_REF}.tar.gz - COMPLEMENT_DIR=complement-${COMPLEMENT_REF} - echo "Checkout available at 'complement-${COMPLEMENT_REF}'" -fi + # enable buildkit for the docker builds + export DOCKER_BUILDKIT=1 + + # Determine whether to use the docker or podman container runtime. + if [ -n "$PODMAN" ]; then + export CONTAINER_RUNTIME=podman + export DOCKER_HOST=unix://$XDG_RUNTIME_DIR/podman/podman.sock + export BUILDAH_FORMAT=docker + export COMPLEMENT_HOSTNAME_RUNNING_COMPLEMENT=host.containers.internal + else + export CONTAINER_RUNTIME=docker + fi -if [ -n "$use_editable_synapse" ]; then + # Change to the repository root + cd "$(dirname $0)/.." + + # Check for a user-specified Complement checkout + if [[ -z "$COMPLEMENT_DIR" ]]; then + COMPLEMENT_REF=${COMPLEMENT_REF:-main} + echo "COMPLEMENT_DIR not set. Fetching Complement checkout from ${COMPLEMENT_REF}..." + wget -Nq https://github.com/matrix-org/complement/archive/${COMPLEMENT_REF}.tar.gz + tar -xzf ${COMPLEMENT_REF}.tar.gz + COMPLEMENT_DIR=complement-${COMPLEMENT_REF} + echo "Checkout available at 'complement-${COMPLEMENT_REF}'" + fi + + if [ -n "$use_editable_synapse" ]; then if [[ -e synapse/synapse_rust.abi3.so ]]; then - # In an editable install, back up the host's compiled Rust module to prevent - # inconvenience; the container will overwrite the module with its own copy. - mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host - # And restore it on exit: - synapse_pkg=`realpath synapse` - trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT + # In an editable install, back up the host's compiled Rust module to prevent + # inconvenience; the container will overwrite the module with its own copy. + mv -n synapse/synapse_rust.abi3.so synapse/synapse_rust.abi3.so~host + # And restore it on exit: + synapse_pkg=`realpath synapse` + trap "mv -f '$synapse_pkg/synapse_rust.abi3.so~host' '$synapse_pkg/synapse_rust.abi3.so'" EXIT fi editable_mount="$(realpath .):/editable-src:z" if [ -n "$rebuild_editable_synapse" ]; then - unset skip_docker_build + unset skip_docker_build elif $CONTAINER_RUNTIME inspect complement-synapse-editable &>/dev/null; then - # complement-synapse-editable already exists: see if we can still use it: - # - The Rust module must still be importable; it will fail to import if the Rust source has changed. - # - The Poetry lock file must be the same (otherwise we assume dependencies have changed) - - # First set up the module in the right place for an editable installation. - $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so - - if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \ - && $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then - skip_docker_build=1 - else - echo "Editable Synapse image is stale. Will rebuild." - unset skip_docker_build - fi + # complement-synapse-editable already exists: see if we can still use it: + # - The Rust module must still be importable; it will fail to import if the Rust source has changed. + # - The Poetry lock file must be the same (otherwise we assume dependencies have changed) + + # First set up the module in the right place for an editable installation. + $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so + + if ($CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'python' complement-synapse-editable -c 'import synapse.synapse_rust' \ + && $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'diff' complement-synapse-editable --brief /editable-src/poetry.lock /poetry.lock.bak); then + skip_docker_build=1 + else + echo "Editable Synapse image is stale. Will rebuild." + unset skip_docker_build + fi fi -fi + fi -if [ -z "$skip_docker_build" ]; then + if [ -z "$skip_docker_build" ]; then if [ -n "$use_editable_synapse" ]; then - # Build a special image designed for use in development with editable - # installs. - $CONTAINER_RUNTIME build -t synapse-editable \ - -f "docker/editable.Dockerfile" . + # Build a special image designed for use in development with editable + # installs. + $CONTAINER_RUNTIME build -t synapse-editable \ + -f "docker/editable.Dockerfile" . - $CONTAINER_RUNTIME build -t synapse-workers-editable \ - --build-arg FROM=synapse-editable \ - -f "docker/Dockerfile-workers" . + $CONTAINER_RUNTIME build -t synapse-workers-editable \ + --build-arg FROM=synapse-editable \ + -f "docker/Dockerfile-workers" . - $CONTAINER_RUNTIME build -t complement-synapse-editable \ - --build-arg FROM=synapse-workers-editable \ - -f "docker/complement/Dockerfile" "docker/complement" + $CONTAINER_RUNTIME build -t complement-synapse-editable \ + --build-arg FROM=synapse-workers-editable \ + -f "docker/complement/Dockerfile" "docker/complement" - # Prepare the Rust module - $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so + # Prepare the Rust module + $CONTAINER_RUNTIME run --rm -v $editable_mount --entrypoint 'cp' complement-synapse-editable -- /synapse_rust.abi3.so.bak /editable-src/synapse/synapse_rust.abi3.so else - # Build the base Synapse image from the local checkout - echo_if_github "::group::Build Docker image: matrixdotorg/synapse" - $CONTAINER_RUNTIME build -t matrixdotorg/synapse \ - --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \ - --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \ - -f "docker/Dockerfile" . - echo_if_github "::endgroup::" - - # Build the workers docker image (from the base Synapse image we just built). - echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers" - $CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . - echo_if_github "::endgroup::" - - # Build the unified Complement image (from the worker Synapse image we just built). - echo_if_github "::group::Build Docker image: complement/Dockerfile" - $CONTAINER_RUNTIME build -t complement-synapse \ - `# This is the tag we end up pushing to the registry (see` \ - `# .github/workflows/push_complement_image.yml) so let's just label it now` \ - `# so people can reference it by the same name locally.` \ - -t ghcr.io/element-hq/synapse/complement-synapse \ - -f "docker/complement/Dockerfile" "docker/complement" - echo_if_github "::endgroup::" + # Build the base Synapse image from the local checkout + echo_if_github "::group::Build Docker image: matrixdotorg/synapse" + $CONTAINER_RUNTIME build -t matrixdotorg/synapse \ + --build-arg TEST_ONLY_SKIP_DEP_HASH_VERIFICATION \ + --build-arg TEST_ONLY_IGNORE_POETRY_LOCKFILE \ + -f "docker/Dockerfile" . + echo_if_github "::endgroup::" + + # Build the workers docker image (from the base Synapse image we just built). + echo_if_github "::group::Build Docker image: matrixdotorg/synapse-workers" + $CONTAINER_RUNTIME build -t matrixdotorg/synapse-workers -f "docker/Dockerfile-workers" . + echo_if_github "::endgroup::" + + # Build the unified Complement image (from the worker Synapse image we just built). + echo_if_github "::group::Build Docker image: complement/Dockerfile" + $CONTAINER_RUNTIME build -t complement-synapse \ + `# This is the tag we end up pushing to the registry (see` \ + `# .github/workflows/push_complement_image.yml) so let's just label it now` \ + `# so people can reference it by the same name locally.` \ + -t ghcr.io/element-hq/synapse/complement-synapse \ + -f "docker/complement/Dockerfile" "docker/complement" + echo_if_github "::endgroup::" fi -fi - -if [ -n "$skip_complement_run" ]; then - echo "Skipping Complement run as requested." - exit -fi - -export COMPLEMENT_BASE_IMAGE=complement-synapse -if [ -n "$use_editable_synapse" ]; then - export COMPLEMENT_BASE_IMAGE=complement-synapse-editable - export COMPLEMENT_HOST_MOUNTS="$editable_mount" -fi - -extra_test_args=() + + echo "Docker images built." + else + echo "Skipping Docker image build as requested." + fi -test_packages=( + test_packages=( ./tests/csapi ./tests ./tests/msc3874 @@ -231,71 +229,104 @@ test_packages=( ./tests/msc4140 ./tests/msc4155 ./tests/msc4306 -) + ) + + # Export the list of test packages as a space-separated environment variable, so other + # scripts can use it. + export SYNAPSE_SUPPORTED_COMPLEMENT_TEST_PACKAGES="${test_packages[@]}" -# Enable dirty runs, so tests will reuse the same container where possible. -# This significantly speeds up tests, but increases the possibility of test pollution. -export COMPLEMENT_ENABLE_DIRTY_RUNS=1 + export COMPLEMENT_BASE_IMAGE=complement-synapse + if [ -n "$use_editable_synapse" ]; then + export COMPLEMENT_BASE_IMAGE=complement-synapse-editable + export COMPLEMENT_HOST_MOUNTS="$editable_mount" + fi -# All environment variables starting with PASS_ will be shared. -# (The prefix is stripped off before reaching the container.) -export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ + # Enable dirty runs, so tests will reuse the same container where possible. + # This significantly speeds up tests, but increases the possibility of test pollution. + export COMPLEMENT_ENABLE_DIRTY_RUNS=1 -# It takes longer than 10m to run the whole suite. -extra_test_args+=("-timeout=60m") + # All environment variables starting with PASS_ will be shared. + # (The prefix is stripped off before reaching the container.) + export COMPLEMENT_SHARE_ENV_PREFIX=PASS_ -if [[ -n "$WORKERS" ]]; then - # Use workers. - export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true + # * -count=1: Only run tests once, and disable caching for tests. + # * -v: Output test logs, even if those tests pass. + # * -tags=synapse_blacklist: Enable the `synapse_blacklist` build tag, which is + # necessary for `runtime.Synapse` checks/skips to work in the tests + test_args=( + -v + -tags="synapse_blacklist" + -count=1 + ) - # Pass through the workers defined. If none, it will be an empty string - export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES" + # It takes longer than 10m to run the whole suite. + test_args+=("-timeout=60m") - # Workers can only use Postgres as a database. - export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + if [[ -n "$WORKERS" ]]; then + # Use workers. + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS=true - # And provide some more configuration to complement. + # Pass through the workers defined. If none, it will be an empty string + export PASS_SYNAPSE_WORKER_TYPES="$WORKER_TYPES" - # It can take quite a while to spin up a worker-mode Synapse for the first - # time (the main problem is that we start 14 python processes for each test, - # and complement likes to do two of them in parallel). - export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120 -else - export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS= - if [[ -n "$POSTGRES" ]]; then + # Workers can only use Postgres as a database. export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + + # And provide some more configuration to complement. + + # It can take quite a while to spin up a worker-mode Synapse for the first + # time (the main problem is that we start 14 python processes for each test, + # and complement likes to do two of them in parallel). + export COMPLEMENT_SPAWN_HS_TIMEOUT_SECS=120 else - export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite + export PASS_SYNAPSE_COMPLEMENT_USE_WORKERS= + if [[ -n "$POSTGRES" ]]; then + export PASS_SYNAPSE_COMPLEMENT_DATABASE=postgres + else + export PASS_SYNAPSE_COMPLEMENT_DATABASE=sqlite + fi fi -fi -if [[ -n "$ASYNCIO_REACTOR" ]]; then - # Enable the Twisted asyncio reactor - export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true -fi + if [[ -n "$ASYNCIO_REACTOR" ]]; then + # Enable the Twisted asyncio reactor + export PASS_SYNAPSE_COMPLEMENT_USE_ASYNCIO_REACTOR=true + fi -if [[ -n "$UNIX_SOCKETS" ]]; then - # Enable full on Unix socket mode for Synapse, Redis and Postgresql - export PASS_SYNAPSE_USE_UNIX_SOCKET=1 -fi + if [[ -n "$UNIX_SOCKETS" ]]; then + # Enable full on Unix socket mode for Synapse, Redis and Postgresql + export PASS_SYNAPSE_USE_UNIX_SOCKET=1 + fi -if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then - # Set the log level to what is desired - export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL" + if [[ -n "$SYNAPSE_TEST_LOG_LEVEL" ]]; then + # Set the log level to what is desired + export PASS_SYNAPSE_LOG_LEVEL="$SYNAPSE_TEST_LOG_LEVEL" - # Allow logging sensitive things (currently SQL queries & parameters). - # (This won't have any effect if we're not logging at DEBUG level overall.) - # Since this is just a test suite, this is fine and won't reveal anyone's - # personal information - export PASS_SYNAPSE_LOG_SENSITIVE=1 -fi + # Allow logging sensitive things (currently SQL queries & parameters). + # (This won't have any effect if we're not logging at DEBUG level overall.) + # Since this is just a test suite, this is fine and won't reveal anyone's + # personal information + export PASS_SYNAPSE_LOG_SENSITIVE=1 + fi -# Log a few more useful things for a developer attempting to debug something -# particularly tricky. -export PASS_SYNAPSE_LOG_TESTING=1 + # Log a few more useful things for a developer attempting to debug something + # particularly tricky. + export PASS_SYNAPSE_LOG_TESTING=1 -# Run the tests! -echo "Images built; running complement with ${extra_test_args[@]} $@ ${test_packages[@]}" -cd "$COMPLEMENT_DIR" + if [ -n "$skip_complement_run" ]; then + echo "Skipping Complement run as requested." + return 0 + fi + + # Run the tests! + echo "Running Complement with ${test_args[@]} $@ ${test_packages[@]}" + cd "$COMPLEMENT_DIR" + go test "${test_args[@]}" "$@" "${test_packages[@]}" +} -go test -v -tags "synapse_blacklist" -count=1 "${extra_test_args[@]}" "$@" "${test_packages[@]}" +main "$@" +# For any non-zero exit code (indicating some sort of error happened), we want to exit +# with that code. +exit_code=$? +if [ $exit_code -ne 0 ]; then + exit $exit_code +fi diff --git a/scripts-dev/release.py b/scripts-dev/release.py index ba95a19382..17eadbf6c3 100755 --- a/scripts-dev/release.py +++ b/scripts-dev/release.py @@ -291,6 +291,12 @@ def _prepare() -> None: synapse_repo.git.add("-u") subprocess.run("git diff --cached", shell=True) + print( + "Consider any upcoming platform deprecations that should be mentioned in the changelog. (e.g. upcoming Python, PostgreSQL or SQLite deprecations)" + ) + print( + "Platform deprecations should be mentioned at least 1 release prior to being unsupported." + ) if click.confirm("Edit changelog?", default=False): click.edit(filename="CHANGES.md") diff --git a/synapse/api/constants.py b/synapse/api/constants.py index 7a8f546d6b..d41e44b154 100644 --- a/synapse/api/constants.py +++ b/synapse/api/constants.py @@ -307,6 +307,10 @@ class AccountDataTypes: MSC4155_INVITE_PERMISSION_CONFIG: Final = ( "org.matrix.msc4155.invite_permission_config" ) + # MSC4380: Invite blocking + MSC4380_INVITE_PERMISSION_CONFIG: Final = ( + "org.matrix.msc4380.invite_permission_config" + ) # Synapse-specific behaviour. See "Client-Server API Extensions" documentation # in Admin API for more information. SYNAPSE_ADMIN_CLIENT_CONFIG: Final = "io.element.synapse.admin_client_config" diff --git a/synapse/api/errors.py b/synapse/api/errors.py index c4339ebef8..37b909a1a7 100644 --- a/synapse/api/errors.py +++ b/synapse/api/errors.py @@ -137,7 +137,7 @@ class Codes(str, Enum): PROFILE_TOO_LARGE = "M_PROFILE_TOO_LARGE" KEY_TOO_LARGE = "M_KEY_TOO_LARGE" - # Part of MSC4155 + # Part of MSC4155/MSC4380 INVITE_BLOCKED = "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED" # Part of MSC4190 diff --git a/synapse/api/ratelimiting.py b/synapse/api/ratelimiting.py index df884d47d7..d6cc3d26b5 100644 --- a/synapse/api/ratelimiting.py +++ b/synapse/api/ratelimiting.py @@ -27,6 +27,7 @@ from synapse.storage.databases.main import DataStore from synapse.types import Requester from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.wheel_timer import WheelTimer if TYPE_CHECKING: @@ -100,7 +101,7 @@ def __init__( # and doesn't affect correctness. self._timer: WheelTimer[Hashable] = WheelTimer() - self.clock.looping_call(self._prune_message_counts, 15 * 1000) + self.clock.looping_call(self._prune_message_counts, Duration(seconds=15)) def _get_key(self, requester: Requester | None, key: Hashable | None) -> Hashable: """Use the requester's MXID as a fallback key if no key is provided.""" diff --git a/synapse/app/phone_stats_home.py b/synapse/app/phone_stats_home.py index 13a0e3db7c..7b4bf25c28 100644 --- a/synapse/app/phone_stats_home.py +++ b/synapse/app/phone_stats_home.py @@ -30,24 +30,20 @@ from synapse.metrics import SERVER_NAME_LABEL from synapse.types import JsonDict -from synapse.util.constants import ( - MILLISECONDS_PER_SECOND, - ONE_HOUR_SECONDS, - ONE_MINUTE_SECONDS, -) +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger("synapse.app.homeserver") -INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS = 5 * ONE_MINUTE_SECONDS +INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME = Duration(minutes=5) """ We wait 5 minutes to send the first set of stats as the server can be quite busy the first few minutes """ -PHONE_HOME_INTERVAL_SECONDS = 3 * ONE_HOUR_SECONDS +PHONE_HOME_INTERVAL = Duration(hours=3) """ Phone home stats are sent every 3 hours """ @@ -222,13 +218,13 @@ def performance_stats_init() -> None: # table will decrease clock.looping_call( hs.get_datastores().main.generate_user_daily_visits, - 5 * ONE_MINUTE_SECONDS * MILLISECONDS_PER_SECOND, + Duration(minutes=5), ) # monthly active user limiting functionality clock.looping_call( hs.get_datastores().main.reap_monthly_active_users, - ONE_HOUR_SECONDS * MILLISECONDS_PER_SECOND, + Duration(hours=1), ) hs.get_datastores().main.reap_monthly_active_users() @@ -267,14 +263,14 @@ async def _generate_monthly_active_users() -> None: if hs.config.server.limit_usage_by_mau or hs.config.server.mau_stats_only: generate_monthly_active_users() - clock.looping_call(generate_monthly_active_users, 5 * 60 * 1000) + clock.looping_call(generate_monthly_active_users, Duration(minutes=5)) # End of monthly active user settings if hs.config.metrics.report_stats: logger.info("Scheduling stats reporting for 3 hour intervals") clock.looping_call( phone_stats_home, - PHONE_HOME_INTERVAL_SECONDS * MILLISECONDS_PER_SECOND, + PHONE_HOME_INTERVAL, hs, stats, ) @@ -282,14 +278,14 @@ async def _generate_monthly_active_users() -> None: # We need to defer this init for the cases that we daemonize # otherwise the process ID we get is that of the non-daemon process clock.call_later( - 0, + Duration(seconds=0), performance_stats_init, ) # We wait 5 minutes to send the first set of stats as the server can # be quite busy the first few minutes clock.call_later( - INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME_SECONDS, + INITIAL_DELAY_BEFORE_FIRST_PHONE_HOME, phone_stats_home, hs, stats, diff --git a/synapse/appservice/scheduler.py b/synapse/appservice/scheduler.py index 250f84d644..c3a83d140c 100644 --- a/synapse/appservice/scheduler.py +++ b/synapse/appservice/scheduler.py @@ -77,6 +77,7 @@ from synapse.storage.databases.main import DataStore from synapse.types import DeviceListUpdates, JsonMapping from synapse.util.clock import Clock, DelayedCallWrapper +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -504,8 +505,8 @@ def __init__( self.scheduled_recovery: DelayedCallWrapper | None = None def recover(self) -> None: - delay = 2**self.backoff_counter - logger.info("Scheduling retries on %s in %fs", self.service.id, delay) + delay = Duration(seconds=2**self.backoff_counter) + logger.info("Scheduling retries on %s in %fs", self.service.id, delay.as_secs()) self.scheduled_recovery = self.clock.call_later( delay, self.hs.run_as_background_process, diff --git a/synapse/config/_base.py b/synapse/config/_base.py index 040b9df54d..32ab1064d7 100644 --- a/synapse/config/_base.py +++ b/synapse/config/_base.py @@ -672,7 +672,8 @@ def load_or_generate_config( action="append", metavar="CONFIG_FILE", help="Specify config file. Can be given multiple times and" - " may specify directories containing *.yaml files.", + " may specify directories containing *.yaml files." + " Top-level keys in later files overwrite ones in earlier files.", ) parser.add_argument( "--no-secrets-in-config", diff --git a/synapse/config/experimental.py b/synapse/config/experimental.py index 6e6cada4d6..596e5c6918 100644 --- a/synapse/config/experimental.py +++ b/synapse/config/experimental.py @@ -441,6 +441,9 @@ def read_config( # previously calculated push actions. self.msc2654_enabled: bool = experimental.get("msc2654_enabled", False) + # MSC2666: Query mutual rooms between two users. + self.msc2666_enabled: bool = experimental.get("msc2666_enabled", False) + # MSC2815 (allow room moderators to view redacted event content) self.msc2815_enabled: bool = experimental.get("msc2815_enabled", False) @@ -596,3 +599,6 @@ def read_config( # MSC4306: Thread Subscriptions # (and MSC4308: Thread Subscriptions extension to Sliding Sync) self.msc4306_enabled: bool = experimental.get("msc4306_enabled", False) + + # MSC4380: Invite blocking + self.msc4380_enabled: bool = experimental.get("msc4380_enabled", False) diff --git a/synapse/events/__init__.py b/synapse/events/__init__.py index 4080170dca..1bac19ba3b 100644 --- a/synapse/events/__init__.py +++ b/synapse/events/__init__.py @@ -554,7 +554,7 @@ def auth_event_ids(self) -> StrCollection: assert create_event_id not in self._dict["auth_events"] if self.type == EventTypes.Create and self.get_state_key() == "": return self._dict["auth_events"] # should be [] - return self._dict["auth_events"] + [create_event_id] + return [*self._dict["auth_events"], create_event_id] def _event_type_from_format_version( diff --git a/synapse/federation/federation_client.py b/synapse/federation/federation_client.py index 4110a90ed6..ba738ad65e 100644 --- a/synapse/federation/federation_client.py +++ b/synapse/federation/federation_client.py @@ -75,6 +75,7 @@ from synapse.types.handlers.policy_server import RECOMMENDATION_OK, RECOMMENDATION_SPAM from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.duration import Duration from synapse.util.retryutils import NotRetryingDestination if TYPE_CHECKING: @@ -132,7 +133,7 @@ def __init__(self, hs: "HomeServer"): super().__init__(hs) self.pdu_destination_tried: dict[str, dict[str, int]] = {} - self._clock.looping_call(self._clear_tried_cache, 60 * 1000) + self._clock.looping_call(self._clear_tried_cache, Duration(minutes=1)) self.state = hs.get_state_handler() self.transport_layer = hs.get_federation_transport_client() diff --git a/synapse/federation/federation_server.py b/synapse/federation/federation_server.py index 35bfbea287..8bbc5b5d11 100644 --- a/synapse/federation/federation_server.py +++ b/synapse/federation/federation_server.py @@ -89,6 +89,7 @@ from synapse.util import unwrapFirstError from synapse.util.async_helpers import Linearizer, concurrently_execute, gather_results from synapse.util.caches.response_cache import ResponseCache +from synapse.util.duration import Duration from synapse.util.stringutils import parse_server_name if TYPE_CHECKING: @@ -226,7 +227,7 @@ async def _handle_old_staged_events(self) -> None: ) # We pause a bit so that we don't start handling all rooms at once. - await self._clock.sleep(random.uniform(0, 0.1)) + await self._clock.sleep(Duration(seconds=random.uniform(0, 0.1))) async def on_backfill_request( self, origin: str, room_id: str, versions: list[str], limit: int @@ -301,7 +302,9 @@ async def on_incoming_transaction( # Start a periodic check for old staged events. This is to handle # the case where locks time out, e.g. if another process gets killed # without dropping its locks. - self._clock.looping_call(self._handle_old_staged_events, 60 * 1000) + self._clock.looping_call( + self._handle_old_staged_events, Duration(minutes=1) + ) # keep this as early as possible to make the calculated origin ts as # accurate as possible. diff --git a/synapse/federation/send_queue.py b/synapse/federation/send_queue.py index cf70e10a58..4a6d155217 100644 --- a/synapse/federation/send_queue.py +++ b/synapse/federation/send_queue.py @@ -53,6 +53,7 @@ from synapse.metrics import SERVER_NAME_LABEL, LaterGauge from synapse.replication.tcp.streams.federation import FederationStream from synapse.types import JsonDict, ReadReceipt, RoomStreamToken, StrCollection +from synapse.util.duration import Duration from synapse.util.metrics import Measure from .units import Edu @@ -137,7 +138,7 @@ def register(queue_name: QueueNames, queue: Sized) -> None: assert isinstance(queue, Sized) register(queue_name, queue=queue) - self.clock.looping_call(self._clear_queue, 30 * 1000) + self.clock.looping_call(self._clear_queue, Duration(seconds=30)) def shutdown(self) -> None: """Stops this federation sender instance from sending further transactions.""" diff --git a/synapse/federation/sender/__init__.py b/synapse/federation/sender/__init__.py index 0bd97c25df..f7240c2f7f 100644 --- a/synapse/federation/sender/__init__.py +++ b/synapse/federation/sender/__init__.py @@ -174,6 +174,7 @@ get_domain_from_id, ) from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.metrics import Measure from synapse.util.retryutils import filter_destinations_by_retry_limiter @@ -218,12 +219,12 @@ # Please note that rate limiting still applies, so while the loop is # executed every X seconds the destinations may not be woken up because # they are being rate limited following previous attempt failures. -WAKEUP_RETRY_PERIOD_SEC = 60 +WAKEUP_RETRY_PERIOD = Duration(minutes=1) -# Time (in s) to wait in between waking up each destination, i.e. one destination +# Time to wait in between waking up each destination, i.e. one destination # will be woken up every seconds until we have woken every destination # has outstanding catch-up. -WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC = 5 +WAKEUP_INTERVAL_BETWEEN_DESTINATIONS = Duration(seconds=5) class AbstractFederationSender(metaclass=abc.ABCMeta): @@ -379,7 +380,7 @@ async def _handle(self) -> None: queue.attempt_new_transaction() - await self.clock.sleep(current_sleep_seconds) + await self.clock.sleep(Duration(seconds=current_sleep_seconds)) if not self.queue: break @@ -468,7 +469,7 @@ def __init__(self, hs: "HomeServer"): # Regularly wake up destinations that have outstanding PDUs to be caught up self.clock.looping_call_now( self.hs.run_as_background_process, - WAKEUP_RETRY_PERIOD_SEC * 1000.0, + WAKEUP_RETRY_PERIOD, "wake_destinations_needing_catchup", self._wake_destinations_needing_catchup, ) @@ -1161,4 +1162,4 @@ async def _wake_destinations_needing_catchup(self) -> None: last_processed, ) self.wake_destination(destination) - await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS_SEC) + await self.clock.sleep(WAKEUP_INTERVAL_BETWEEN_DESTINATIONS) diff --git a/synapse/handlers/account_validity.py b/synapse/handlers/account_validity.py index bc50efa1a7..ba40d5763e 100644 --- a/synapse/handlers/account_validity.py +++ b/synapse/handlers/account_validity.py @@ -28,6 +28,7 @@ from synapse.types import UserID from synapse.util import stringutils from synapse.util.async_helpers import delay_cancellation +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -73,7 +74,7 @@ def __init__(self, hs: "HomeServer"): # Check the renewal emails to send and send them every 30min. if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._send_renewal_emails, 30 * 60 * 1000) + self.clock.looping_call(self._send_renewal_emails, Duration(minutes=30)) async def is_user_expired(self, user_id: str) -> bool: """Checks if a user has expired against third-party modules. diff --git a/synapse/handlers/auth.py b/synapse/handlers/auth.py index a96d33cb16..d439380197 100644 --- a/synapse/handlers/auth.py +++ b/synapse/handlers/auth.py @@ -74,6 +74,7 @@ from synapse.types import JsonDict, Requester, StrCollection, UserID from synapse.util import stringutils as stringutils from synapse.util.async_helpers import delay_cancellation, maybe_awaitable +from synapse.util.duration import Duration from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import base62_encode from synapse.util.threepids import canonicalise_email @@ -242,7 +243,7 @@ def __init__(self, hs: "HomeServer"): if hs.config.worker.run_background_tasks: self._clock.looping_call( run_as_background_process, - 5 * 60 * 1000, + Duration(minutes=5), "expire_old_sessions", self.server_name, self._expire_old_sessions, diff --git a/synapse/handlers/delayed_events.py b/synapse/handlers/delayed_events.py index de21e3abbb..cb0a4dd6b2 100644 --- a/synapse/handlers/delayed_events.py +++ b/synapse/handlers/delayed_events.py @@ -42,6 +42,7 @@ UserID, create_requester, ) +from synapse.util.duration import Duration from synapse.util.events import generate_fake_event_id from synapse.util.metrics import Measure from synapse.util.sentinel import Sentinel @@ -92,20 +93,22 @@ async def _schedule_db_events() -> None: # Kick off again (without blocking) to catch any missed notifications # that may have fired before the callback was added. self._clock.call_later( - 0, + Duration(seconds=0), self.notify_new_event, ) - # Delayed events that are already marked as processed on startup might not have been - # sent properly on the last run of the server, so unmark them to send them again. + # Now process any delayed events that are due to be sent. + # + # We set `reprocess_events` to True in case any events had been + # marked as processed, but had not yet actually been sent, + # before the homeserver stopped. + # # Caveat: this will double-send delayed events that successfully persisted, but failed # to be removed from the DB table of delayed events. # TODO: To avoid double-sending, scan the timeline to find which of these events were # already sent. To do so, must store delay_ids in sent events to retrieve them later. - await self._store.unprocess_delayed_events() - events, next_send_ts = await self._store.process_timeout_delayed_events( - self._get_current_ts() + self._get_current_ts(), reprocess_events=True ) if next_send_ts: @@ -423,18 +426,23 @@ async def restart(self, request: SynapseRequest, delay_id: str) -> None: Raises: NotFoundError: if no matching delayed event could be found. """ - assert self._is_master await self._delayed_event_mgmt_ratelimiter.ratelimit( None, request.getClientAddress().host ) - await make_deferred_yieldable(self._initialized_from_db) + + # Note: We don't need to wait on `self._initialized_from_db` here as the + # events that deals with are already marked as processed. + # + # `restart_delayed_events` will skip over such events entirely. next_send_ts = await self._store.restart_delayed_event( delay_id, self._get_current_ts() ) - if self._next_send_ts_changed(next_send_ts): - self._schedule_next_at(next_send_ts) + # Only the main process handles sending delayed events. + if self._is_master: + if self._next_send_ts_changed(next_send_ts): + self._schedule_next_at(next_send_ts) async def send(self, request: SynapseRequest, delay_id: str) -> None: """ @@ -501,17 +509,17 @@ def _schedule_next_at_or_none(self, next_send_ts: Timestamp | None) -> None: def _schedule_next_at(self, next_send_ts: Timestamp) -> None: delay = next_send_ts - self._get_current_ts() - delay_sec = delay / 1000 if delay > 0 else 0 + delay_duration = Duration(milliseconds=max(delay, 0)) if self._next_delayed_event_call is None: self._next_delayed_event_call = self._clock.call_later( - delay_sec, + delay_duration, self.hs.run_as_background_process, "_send_on_timeout", self._send_on_timeout, ) else: - self._next_delayed_event_call.reset(delay_sec) + self._next_delayed_event_call.reset(delay_duration.as_secs()) async def get_all_for_user(self, requester: Requester) -> list[JsonDict]: """Return all pending delayed events requested by the given user.""" diff --git a/synapse/handlers/device.py b/synapse/handlers/device.py index 3f1a5fe6d6..1b7de57ab9 100644 --- a/synapse/handlers/device.py +++ b/synapse/handlers/device.py @@ -71,6 +71,7 @@ from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.metrics import measure_func from synapse.util.retryutils import ( NotRetryingDestination, @@ -85,7 +86,7 @@ DELETE_DEVICE_MSGS_TASK_NAME = "delete_device_messages" MAX_DEVICE_DISPLAY_NAME_LEN = 100 -DELETE_STALE_DEVICES_INTERVAL_MS = 24 * 60 * 60 * 1000 +DELETE_STALE_DEVICES_INTERVAL = Duration(days=1) def _check_device_name_length(name: str | None) -> None: @@ -186,7 +187,7 @@ def __init__(self, hs: "HomeServer"): ): self.clock.looping_call( self.hs.run_as_background_process, - DELETE_STALE_DEVICES_INTERVAL_MS, + DELETE_STALE_DEVICES_INTERVAL, desc="delete_stale_devices", func=self._delete_stale_devices, ) @@ -915,7 +916,7 @@ async def handle_new_device_update(self) -> None: ) DEVICE_MSGS_DELETE_BATCH_LIMIT = 1000 - DEVICE_MSGS_DELETE_SLEEP_MS = 100 + DEVICE_MSGS_DELETE_SLEEP = Duration(milliseconds=100) async def _delete_device_messages( self, @@ -941,9 +942,7 @@ async def _delete_device_messages( if from_stream_id is None: return TaskStatus.COMPLETE, None, None - await self.clock.sleep( - DeviceWriterHandler.DEVICE_MSGS_DELETE_SLEEP_MS / 1000.0 - ) + await self.clock.sleep(DeviceWriterHandler.DEVICE_MSGS_DELETE_SLEEP) class DeviceWriterHandler(DeviceHandler): @@ -1469,7 +1468,7 @@ def __init__(self, hs: "HomeServer", device_handler: DeviceWriterHandler): self._resync_retry_lock = Lock() self.clock.looping_call( self.hs.run_as_background_process, - 30 * 1000, + Duration(seconds=30), func=self._maybe_retry_device_resync, desc="_maybe_retry_device_resync", ) diff --git a/synapse/handlers/e2e_keys.py b/synapse/handlers/e2e_keys.py index 41d27d47da..64f705a3da 100644 --- a/synapse/handlers/e2e_keys.py +++ b/synapse/handlers/e2e_keys.py @@ -46,6 +46,7 @@ ) from synapse.util.async_helpers import Linearizer, concurrently_execute from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.json import json_decoder from synapse.util.retryutils import ( NotRetryingDestination, @@ -1634,7 +1635,7 @@ async def _delete_old_one_time_keys_task( # matrix.org has about 15M users in the e2e_one_time_keys_json table # (comprising 20M devices). We want this to take about a week, so we need # to do about one batch of 100 users every 4 seconds. - await self.clock.sleep(4) + await self.clock.sleep(Duration(seconds=4)) def _check_cross_signing_key( diff --git a/synapse/handlers/federation.py b/synapse/handlers/federation.py index 1fcdb0edc7..611f307d31 100644 --- a/synapse/handlers/federation.py +++ b/synapse/handlers/federation.py @@ -72,6 +72,7 @@ from synapse.types import JsonDict, StrCollection, get_domain_from_id from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer +from synapse.util.duration import Duration from synapse.util.retryutils import NotRetryingDestination from synapse.visibility import filter_events_for_server @@ -1966,7 +1967,9 @@ async def _sync_partial_state_room( logger.warning( "%s; waiting for %d ms...", e, e.retry_after_ms ) - await self.clock.sleep(e.retry_after_ms / 1000) + await self.clock.sleep( + Duration(milliseconds=e.retry_after_ms) + ) # Success, no need to try the rest of the destinations. break diff --git a/synapse/handlers/federation_event.py b/synapse/handlers/federation_event.py index 01e98f60ad..e314180e12 100644 --- a/synapse/handlers/federation_event.py +++ b/synapse/handlers/federation_event.py @@ -91,6 +91,7 @@ ) from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer, concurrently_execute +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter, partition, sorted_topologically from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import shortstr @@ -1802,7 +1803,7 @@ async def prep(event: EventBase) -> None: # the reactor. For large rooms let's yield to the reactor # occasionally to ensure we don't block other work. if (i + 1) % 1000 == 0: - await self._clock.sleep(0) + await self._clock.sleep(Duration(seconds=0)) # Also persist the new event in batches for similar reasons as above. for batch in batch_iter(events_and_contexts_to_persist, 1000): diff --git a/synapse/handlers/message.py b/synapse/handlers/message.py index e885f9c61a..8ae14b2a5c 100644 --- a/synapse/handlers/message.py +++ b/synapse/handlers/message.py @@ -83,6 +83,7 @@ from synapse.util import log_failure, unwrapFirstError from synapse.util.async_helpers import Linearizer, gather_results from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.duration import Duration from synapse.util.json import json_decoder, json_encoder from synapse.util.metrics import measure_func from synapse.visibility import get_effective_room_visibility_from_state @@ -433,14 +434,11 @@ def _schedule_expiry_for_event(self, event_id: str, expiry_ts: int) -> None: # Figure out how many seconds we need to wait before expiring the event. now_ms = self.clock.time_msec() - delay = (expiry_ts - now_ms) / 1000 + delay = Duration(milliseconds=max(expiry_ts - now_ms, 0)) - # callLater doesn't support negative delays, so trim the delay to 0 if we're - # in that case. - if delay < 0: - delay = 0 - - logger.info("Scheduling expiry for event %s in %.3fs", event_id, delay) + logger.info( + "Scheduling expiry for event %s in %.3fs", event_id, delay.as_secs() + ) self._scheduled_expiry = self.clock.call_later( delay, @@ -551,7 +549,7 @@ def __init__(self, hs: "HomeServer"): "send_dummy_events_to_fill_extremities", self._send_dummy_events_to_fill_extremities, ), - 5 * 60 * 1000, + Duration(minutes=5), ) self._message_handler = hs.get_message_handler() @@ -1012,7 +1010,7 @@ async def create_and_send_nonmember_event( if not ignore_shadow_ban and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() room_version = None @@ -1515,7 +1513,7 @@ async def handle_new_client_event( and requester.shadow_banned ): # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() if event.is_state(): @@ -1957,6 +1955,12 @@ async def persist_and_notify_client_events( room_alias_str = event.content.get("alias", None) directory_handler = self.hs.get_directory_handler() if room_alias_str and room_alias_str != original_alias: + if not isinstance(room_alias_str, str): + raise SynapseError( + 400, + "The alias must be of type string.", + Codes.INVALID_PARAM, + ) await self._validate_canonical_alias( directory_handler, room_alias_str, event.room_id ) @@ -1980,6 +1984,12 @@ async def persist_and_notify_client_events( new_alt_aliases = set(alt_aliases) - set(original_alt_aliases) if new_alt_aliases: for alias_str in new_alt_aliases: + if not isinstance(alias_str, str): + raise SynapseError( + 400, + "Each alt_alias must be of type string.", + Codes.INVALID_PARAM, + ) await self._validate_canonical_alias( directory_handler, alias_str, event.room_id ) diff --git a/synapse/handlers/pagination.py b/synapse/handlers/pagination.py index 85648a4288..13aa2c97f4 100644 --- a/synapse/handlers/pagination.py +++ b/synapse/handlers/pagination.py @@ -21,27 +21,31 @@ import logging from typing import TYPE_CHECKING, cast +import attr + from twisted.python.failure import Failure from synapse.api.constants import Direction, EventTypes, Membership from synapse.api.errors import SynapseError from synapse.api.filtering import Filter -from synapse.events.utils import SerializeEventConfig +from synapse.events import EventBase +from synapse.handlers.relations import BundledAggregations from synapse.handlers.worker_lock import NEW_EVENT_DURING_PURGE_LOCK_NAME from synapse.logging.opentracing import trace from synapse.rest.admin._base import assert_user_is_admin from synapse.streams.config import PaginationConfig from synapse.types import ( - JsonDict, JsonMapping, Requester, ScheduledTask, StreamKeyType, + StreamToken, TaskStatus, ) from synapse.types.handlers import ShutdownRoomParams, ShutdownRoomResponse from synapse.types.state import StateFilter from synapse.util.async_helpers import ReadWriteLock +from synapse.util.duration import Duration from synapse.visibility import filter_events_for_client if TYPE_CHECKING: @@ -69,6 +73,58 @@ SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME = "shutdown_and_purge_room" +@attr.s(slots=True, frozen=True, auto_attribs=True) +class GetMessagesResult: + """ + Everything needed to serialize a `/messages` response. + """ + + messages_chunk: list[EventBase] + """ + A list of room events. + + - When the request is `Direction.FORWARDS`, events will be in the range: + `start_token` < x <= `end_token`, (ascending topological_order) + - When the request is `Direction.BACKWARDS`, events will be in the range: + `start_token` >= x > `end_token`, (descending topological_order) + + Note that an empty chunk does not necessarily imply that no more events are + available. Clients should continue to paginate until no `end_token` property is returned. + """ + + bundled_aggregations: dict[str, BundledAggregations] + """ + A map of event ID to the bundled aggregations for the events in the chunk. + + If an event doesn't have any bundled aggregations, it may not appear in the map. + """ + + state: list[EventBase] | None + """ + A list of state events relevant to showing the chunk. For example, if + lazy_load_members is enabled in the filter then this may contain the membership + events for the senders of events in the chunk. + + Omitted from the response when `None`. + """ + + start_token: StreamToken + """ + Token corresponding to the start of chunk. This will be the same as the value given + in `from` query parameter of the `/messages` request. + """ + + end_token: StreamToken | None + """ + A token corresponding to the end of chunk. This token can be passed back to this + endpoint to request further events. + + If no further events are available (either because we have reached the start of the + timeline, or because the user does not have permission to see any more events), this + property is omitted from the response. + """ + + class PaginationHandler: """Handles pagination and purge history requests. @@ -116,7 +172,7 @@ def __init__(self, hs: "HomeServer"): self.clock.looping_call( self.hs.run_as_background_process, - job.interval, + Duration(milliseconds=job.interval), "purge_history_for_rooms_in_range", self.purge_history_for_rooms_in_range, job.shortest_max_lifetime, @@ -417,7 +473,7 @@ async def get_messages( as_client_event: bool = True, event_filter: Filter | None = None, use_admin_priviledge: bool = False, - ) -> JsonDict: + ) -> GetMessagesResult: """Get messages in a room. Args: @@ -620,10 +676,13 @@ async def get_messages( # In that case we do not return end, to tell the client # there is no need for further queries. if not events: - return { - "chunk": [], - "start": await from_token.to_string(self.store), - } + return GetMessagesResult( + messages_chunk=[], + bundled_aggregations={}, + state=None, + start_token=from_token, + end_token=None, + ) if event_filter: events = await event_filter.filter(events) @@ -639,11 +698,13 @@ async def get_messages( # if after the filter applied there are no more events # return immediately - but there might be more in next_token batch if not events: - return { - "chunk": [], - "start": await from_token.to_string(self.store), - "end": await next_token.to_string(self.store), - } + return GetMessagesResult( + messages_chunk=[], + bundled_aggregations={}, + state=None, + start_token=from_token, + end_token=next_token, + ) state = None if event_filter and event_filter.lazy_load_members and len(events) > 0: @@ -660,38 +721,20 @@ async def get_messages( if state_ids: state_dict = await self.store.get_events(list(state_ids.values())) - state = state_dict.values() + state = list(state_dict.values()) aggregations = await self._relations_handler.get_bundled_aggregations( events, user_id ) - time_now = self.clock.time_msec() - - serialize_options = SerializeEventConfig( - as_client_event=as_client_event, requester=requester + return GetMessagesResult( + messages_chunk=events, + bundled_aggregations=aggregations, + state=state, + start_token=from_token, + end_token=next_token, ) - chunk = { - "chunk": ( - await self._event_serializer.serialize_events( - events, - time_now, - config=serialize_options, - bundle_aggregations=aggregations, - ) - ), - "start": await from_token.to_string(self.store), - "end": await next_token.to_string(self.store), - } - - if state: - chunk["state"] = await self._event_serializer.serialize_events( - state, time_now, config=serialize_options - ) - - return chunk - async def _shutdown_and_purge_room( self, task: ScheduledTask, diff --git a/synapse/handlers/presence.py b/synapse/handlers/presence.py index ca5002cab3..4c3adca46e 100644 --- a/synapse/handlers/presence.py +++ b/synapse/handlers/presence.py @@ -121,6 +121,7 @@ get_domain_from_id, ) from synapse.util.async_helpers import Linearizer +from synapse.util.duration import Duration from synapse.util.metrics import Measure from synapse.util.wheel_timer import WheelTimer @@ -203,7 +204,7 @@ # Delay before a worker tells the presence handler that a user has stopped # syncing. -UPDATE_SYNCING_USERS_MS = 10 * 1000 +UPDATE_SYNCING_USERS = Duration(seconds=10) assert LAST_ACTIVE_GRANULARITY < IDLE_TIMER @@ -528,7 +529,7 @@ def __init__(self, hs: "HomeServer"): self._bump_active_client = ReplicationBumpPresenceActiveTime.make_client(hs) self._set_state_client = ReplicationPresenceSetState.make_client(hs) - self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS_MS) + self.clock.looping_call(self.send_stop_syncing, UPDATE_SYNCING_USERS) hs.register_async_shutdown_handler( phase="before", @@ -581,7 +582,7 @@ def send_stop_syncing(self) -> None: for (user_id, device_id), last_sync_ms in list( self._user_devices_going_offline.items() ): - if now - last_sync_ms > UPDATE_SYNCING_USERS_MS: + if now - last_sync_ms > UPDATE_SYNCING_USERS.as_millis(): self._user_devices_going_offline.pop((user_id, device_id), None) self.send_user_sync(user_id, device_id, False, last_sync_ms) @@ -861,20 +862,20 @@ def __init__(self, hs: "HomeServer"): # The initial delay is to allow disconnected clients a chance to # reconnect before we treat them as offline. self.clock.call_later( - 30, + Duration(seconds=30), self.clock.looping_call, self._handle_timeouts, - 5000, + Duration(seconds=5), ) # Presence information is persisted, whether or not it is being tracked # internally. if self._presence_enabled: self.clock.call_later( - 60, + Duration(minutes=1), self.clock.looping_call, self._persist_unpersisted_changes, - 60 * 1000, + Duration(minutes=1), ) presence_wheel_timer_size_gauge.register_hook( @@ -2430,7 +2431,7 @@ class PresenceFederationQueue: _KEEP_ITEMS_IN_QUEUE_FOR_MS = 5 * 60 * 1000 # How often to check if we can expire entries from the queue. - _CLEAR_ITEMS_EVERY_MS = 60 * 1000 + _CLEAR_ITEMS_EVERY_MS = Duration(minutes=1) def __init__(self, hs: "HomeServer", presence_handler: BasePresenceHandler): self._clock = hs.get_clock() diff --git a/synapse/handlers/profile.py b/synapse/handlers/profile.py index 61fcd0641c..6ecf04487e 100644 --- a/synapse/handlers/profile.py +++ b/synapse/handlers/profile.py @@ -34,6 +34,7 @@ from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia from synapse.types import JsonDict, JsonValue, Requester, UserID, create_requester from synapse.util.caches.descriptors import cached +from synapse.util.duration import Duration from synapse.util.stringutils import parse_and_validate_mxc_uri if TYPE_CHECKING: @@ -587,7 +588,7 @@ async def _update_join_states( # Do not actually update the room state for shadow-banned users. if requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) return room_ids = await self.store.get_rooms_for_user(target_user.to_string()) diff --git a/synapse/handlers/room.py b/synapse/handlers/room.py index 925ac39722..d6d35f68ed 100644 --- a/synapse/handlers/room.py +++ b/synapse/handlers/room.py @@ -92,6 +92,7 @@ from synapse.util import stringutils from synapse.util.async_helpers import concurrently_execute from synapse.util.caches.response_cache import ResponseCache +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.stringutils import parse_and_validate_server_name from synapse.visibility import filter_events_for_client @@ -1192,7 +1193,7 @@ async def create_room( if (invite_list or invite_3pid_list) and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) # Allow the request to go through, but remove any associated invites. invite_3pid_list = [] diff --git a/synapse/handlers/room_member.py b/synapse/handlers/room_member.py index d5f72c1732..6f8481de9a 100644 --- a/synapse/handlers/room_member.py +++ b/synapse/handlers/room_member.py @@ -66,6 +66,7 @@ from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer from synapse.util.distributor import user_left_room +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -642,7 +643,7 @@ async def update_membership( if action == Membership.INVITE and requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() key = (room_id,) @@ -1647,7 +1648,7 @@ async def do_3pid_invite( if requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() # We need to rate limit *before* we send out any 3PID invites, so we @@ -2190,7 +2191,7 @@ def __init__(self, hs: "HomeServer"): # We kick this off to pick up outstanding work from before the last restart. self._clock.call_later( - 0, + Duration(seconds=0), self.notify_new_event, ) @@ -2232,7 +2233,7 @@ async def _unsafe_process(self) -> None: # # We wait for a short time so that we don't "tight" loop just # keeping the table up to date. - await self._clock.sleep(0.5) + await self._clock.sleep(Duration(milliseconds=500)) self.pos = self._store.get_room_max_stream_ordering() await self._store.update_room_forgetter_stream_pos(self.pos) diff --git a/synapse/handlers/sliding_sync/__init__.py b/synapse/handlers/sliding_sync/__init__.py index 6a5d5c7b3c..68135e9cd3 100644 --- a/synapse/handlers/sliding_sync/__init__.py +++ b/synapse/handlers/sliding_sync/__init__.py @@ -761,8 +761,6 @@ async def get_room_sync_data( != Membership.JOIN, filter_send_to_client=True, ) - # TODO: Filter out `EventTypes.CallInvite` in public rooms, - # see https://github.com/element-hq/synapse/issues/17359 # TODO: Handle timeline gaps (`get_timeline_gaps()`) diff --git a/synapse/handlers/sliding_sync/room_lists.py b/synapse/handlers/sliding_sync/room_lists.py index 3d11902236..8969d91583 100644 --- a/synapse/handlers/sliding_sync/room_lists.py +++ b/synapse/handlers/sliding_sync/room_lists.py @@ -34,10 +34,12 @@ EventTypes, Membership, ) +from synapse.api.errors import SlidingSyncUnknownPosition from synapse.api.room_versions import KNOWN_ROOM_VERSIONS from synapse.events import StrippedStateEvent from synapse.events.utils import parse_stripped_state_event from synapse.logging.opentracing import start_active_span, trace +from synapse.storage.databases.main.sliding_sync import UPDATE_INTERVAL_LAST_USED_TS from synapse.storage.databases.main.state import ( ROOM_UNKNOWN_SENTINEL, Sentinel as StateSentinel, @@ -68,6 +70,7 @@ ) from synapse.types.state import StateFilter from synapse.util import MutableOverlayMapping +from synapse.util.duration import Duration from synapse.util.sentinel import Sentinel if TYPE_CHECKING: @@ -77,6 +80,27 @@ logger = logging.getLogger(__name__) +# Minimum time in milliseconds since the last sync before we consider expiring +# the connection due to too many rooms to send. This stops from getting into +# tight loops with clients that request lots of data at once. +# +# c.f. `NUM_ROOMS_THRESHOLD`. These values are somewhat arbitrary picked. +MINIMUM_NOT_USED_AGE_EXPIRY = Duration(hours=1) + +# How many rooms with updates we allow before we consider the connection expired +# due to too many rooms to send. +# +# c.f. `MINIMUM_NOT_USED_AGE_EXPIRY_MS`. These values are somewhat arbitrary +# picked. +NUM_ROOMS_THRESHOLD = 100 + +# Sanity check that our minimum age is sensible compared to the update interval, +# i.e. if `MINIMUM_NOT_USED_AGE_EXPIRY_MS` is too small then we might expire the +# connection even if it is actively being used (and we're just not updating the +# DB frequently enough). We arbitrarily double the update interval to give some +# wiggle room. +assert 2 * UPDATE_INTERVAL_LAST_USED_TS < MINIMUM_NOT_USED_AGE_EXPIRY + # Helper definition for the types that we might return. We do this to avoid # copying data between types (which can be expensive for many rooms). RoomsForUserType = RoomsForUserStateReset | RoomsForUser | RoomsForUserSlidingSync @@ -176,6 +200,7 @@ def __init__(self, hs: "HomeServer"): self.storage_controllers = hs.get_storage_controllers() self.rooms_to_exclude_globally = hs.config.server.rooms_to_exclude_from_sync self.is_mine_id = hs.is_mine_id + self._clock = hs.get_clock() async def compute_interested_rooms( self, @@ -857,11 +882,41 @@ async def _filter_relevant_rooms_to_send( # We only need to check for new events since any state changes # will also come down as new events. - rooms_that_have_updates = ( - self.store.get_rooms_that_might_have_updates( + + rooms_that_have_updates = await ( + self.store.get_rooms_that_have_updates_since_sliding_sync_table( relevant_room_map.keys(), from_token.room_key ) ) + + # Check if we have lots of updates to send, if so then its + # better for us to tell the client to do a full resync + # instead (to try and avoid long SSS response times when + # there is new data). + # + # Due to the construction of the SSS API, the client is in + # charge of setting the range of rooms to request updates + # for. Generally, it will start with a small range and then + # expand (and occasionally it may contract the range again + # if its been offline for a while). If we know there are a + # lot of updates, it's better to reset the connection and + # wait for the client to start again (with a much smaller + # range) than to try and send down a large number of updates + # (which can take a long time). + # + # We only do this if the last sync was over + # `MINIMUM_NOT_USED_AGE_EXPIRY_MS` to ensure we don't get + # into tight loops with clients that keep requesting large + # sliding sync windows. + if len(rooms_that_have_updates) > NUM_ROOMS_THRESHOLD: + last_sync_ts = previous_connection_state.last_used_ts + if ( + last_sync_ts is not None + and (self._clock.time_msec() - last_sync_ts) + > MINIMUM_NOT_USED_AGE_EXPIRY.as_millis() + ): + raise SlidingSyncUnknownPosition() + rooms_should_send.update(rooms_that_have_updates) relevant_rooms_to_send_map = { room_id: room_sync_config diff --git a/synapse/handlers/sliding_sync/store.py b/synapse/handlers/sliding_sync/store.py index 7bcd5f27ea..d01fab271f 100644 --- a/synapse/handlers/sliding_sync/store.py +++ b/synapse/handlers/sliding_sync/store.py @@ -75,7 +75,7 @@ async def get_and_clear_connection_positions( """ # If this is our first request, there is no previous connection state to fetch out of the database if from_token is None or from_token.connection_position == 0: - return PerConnectionState() + return PerConnectionState(last_used_ts=None) conn_id = sync_config.conn_id or "" diff --git a/synapse/handlers/stats.py b/synapse/handlers/stats.py index 1e5d996317..94f4da0430 100644 --- a/synapse/handlers/stats.py +++ b/synapse/handlers/stats.py @@ -37,6 +37,7 @@ ) from synapse.storage.databases.main.state_deltas import StateDelta from synapse.types import JsonDict +from synapse.util.duration import Duration from synapse.util.events import get_plain_text_topic_from_event_content if TYPE_CHECKING: @@ -77,7 +78,7 @@ def __init__(self, hs: "HomeServer"): # We kick this off so that we don't have to wait for a change before # we start populating stats self.clock.call_later( - 0, + Duration(seconds=0), self.notify_new_event, ) diff --git a/synapse/handlers/sync.py b/synapse/handlers/sync.py index b534e24698..60d8827425 100644 --- a/synapse/handlers/sync.py +++ b/synapse/handlers/sync.py @@ -36,7 +36,6 @@ Direction, EventContentFields, EventTypes, - JoinRules, Membership, ) from synapse.api.filtering import FilterCollection @@ -790,22 +789,13 @@ async def _load_filtered_recents( ) ) - filtered_recents = await filter_events_for_client( + loaded_recents = await filter_events_for_client( self._storage_controllers, sync_config.user.to_string(), loaded_recents, always_include_ids=current_state_ids, ) - loaded_recents = [] - for event in filtered_recents: - if event.type == EventTypes.CallInvite: - room_info = await self.store.get_room_with_stats(event.room_id) - assert room_info is not None - if room_info.join_rules == JoinRules.PUBLIC: - continue - loaded_recents.append(event) - log_kv({"loaded_recents_after_client_filtering": len(loaded_recents)}) loaded_recents.extend(recents) diff --git a/synapse/handlers/typing.py b/synapse/handlers/typing.py index 8b577d5d58..e66396fecc 100644 --- a/synapse/handlers/typing.py +++ b/synapse/handlers/typing.py @@ -41,6 +41,7 @@ UserID, ) from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.duration import Duration from synapse.util.metrics import Measure from synapse.util.retryutils import filter_destinations_by_retry_limiter from synapse.util.wheel_timer import WheelTimer @@ -60,15 +61,15 @@ class RoomMember: # How often we expect remote servers to resend us presence. -FEDERATION_TIMEOUT = 60 * 1000 +FEDERATION_TIMEOUT = Duration(minutes=1) # How often to resend typing across federation. -FEDERATION_PING_INTERVAL = 40 * 1000 +FEDERATION_PING_INTERVAL = Duration(seconds=40) # How long to remember a typing notification happened in a room before # forgetting about it. -FORGET_TIMEOUT = 10 * 60 * 1000 +FORGET_TIMEOUT = Duration(minutes=10) class FollowerTypingHandler: @@ -106,7 +107,7 @@ def __init__(self, hs: "HomeServer"): self._rooms_updated: set[str] = set() - self.clock.looping_call(self._handle_timeouts, 5000) + self.clock.looping_call(self._handle_timeouts, Duration(seconds=5)) self.clock.looping_call(self._prune_old_typing, FORGET_TIMEOUT) def _reset(self) -> None: @@ -141,7 +142,10 @@ def _handle_timeout_for_member(self, now: int, member: RoomMember) -> None: # user. if self.federation and self.is_mine_id(member.user_id): last_fed_poke = self._member_last_federation_poke.get(member, None) - if not last_fed_poke or last_fed_poke + FEDERATION_PING_INTERVAL <= now: + if ( + not last_fed_poke + or last_fed_poke + FEDERATION_PING_INTERVAL.as_millis() <= now + ): self.hs.run_as_background_process( "typing._push_remote", self._push_remote, @@ -165,7 +169,7 @@ async def _push_remote(self, member: RoomMember, typing: bool) -> None: now = self.clock.time_msec() self.wheel_timer.insert( - now=now, obj=member, then=now + FEDERATION_PING_INTERVAL + now=now, obj=member, then=now + FEDERATION_PING_INTERVAL.as_millis() ) hosts: StrCollection = ( @@ -315,7 +319,7 @@ async def started_typing( if requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() await self.auth.check_user_in_room(room_id, requester) @@ -350,7 +354,7 @@ async def stopped_typing( if requester.shadow_banned: # We randomly sleep a bit just to annoy the requester. - await self.clock.sleep(random.randint(1, 10)) + await self.clock.sleep(Duration(seconds=random.randint(1, 10))) raise ShadowBanError() await self.auth.check_user_in_room(room_id, requester) @@ -428,8 +432,10 @@ async def _recv_edu(self, origin: str, content: JsonDict) -> None: if user.domain in domains: logger.info("Got typing update from %s: %r", user_id, content) now = self.clock.time_msec() - self._member_typing_until[member] = now + FEDERATION_TIMEOUT - self.wheel_timer.insert(now=now, obj=member, then=now + FEDERATION_TIMEOUT) + self._member_typing_until[member] = now + FEDERATION_TIMEOUT.as_millis() + self.wheel_timer.insert( + now=now, obj=member, then=now + FEDERATION_TIMEOUT.as_millis() + ) self._push_update_local(member=member, typing=content["typing"]) def _push_update_local(self, member: RoomMember, typing: bool) -> None: diff --git a/synapse/handlers/user_directory.py b/synapse/handlers/user_directory.py index e5210a3e97..36b037e8e1 100644 --- a/synapse/handlers/user_directory.py +++ b/synapse/handlers/user_directory.py @@ -40,6 +40,7 @@ from synapse.storage.databases.main.user_directory import SearchResult from synapse.storage.roommember import ProfileInfo from synapse.types import UserID +from synapse.util.duration import Duration from synapse.util.metrics import Measure from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import non_null_str_or_none @@ -52,7 +53,7 @@ # Don't refresh a stale user directory entry, using a Federation /profile request, # for 60 seconds. This gives time for other state events to arrive (which will # then be coalesced such that only one /profile request is made). -USER_DIRECTORY_STALE_REFRESH_TIME_MS = 60 * 1000 +USER_DIRECTORY_STALE_REFRESH_TIME = Duration(minutes=1) # Maximum number of remote servers that we will attempt to refresh profiles for # in one go. @@ -60,7 +61,7 @@ # As long as we have servers to refresh (without backoff), keep adding more # every 15 seconds. -INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = 15 +INTERVAL_TO_ADD_MORE_SERVERS_TO_REFRESH_PROFILES = Duration(seconds=15) def calculate_time_of_next_retry(now_ts: int, retry_count: int) -> int: @@ -137,13 +138,13 @@ def __init__(self, hs: "HomeServer"): # We kick this off so that we don't have to wait for a change before # we start populating the user directory self.clock.call_later( - 0, + Duration(seconds=0), self.notify_new_event, ) # Kick off the profile refresh process on startup self._refresh_remote_profiles_call_later = self.clock.call_later( - 10, + Duration(seconds=10), self.kick_off_remote_profile_refresh_process, ) @@ -550,7 +551,7 @@ async def _handle_possible_remote_profile_change( now_ts = self.clock.time_msec() await self.store.set_remote_user_profile_in_user_dir_stale( user_id, - next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME_MS, + next_try_at_ms=now_ts + USER_DIRECTORY_STALE_REFRESH_TIME.as_millis(), retry_counter=0, ) # Schedule a wake-up to refresh the user directory for this server. @@ -558,13 +559,13 @@ async def _handle_possible_remote_profile_change( # other servers ahead of it in the queue to get in the way of updating # the profile if the server only just sent us an event. self.clock.call_later( - USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1, + USER_DIRECTORY_STALE_REFRESH_TIME + Duration(seconds=1), self.kick_off_remote_profile_refresh_process_for_remote_server, UserID.from_string(user_id).domain, ) # Schedule a wake-up to handle any backoffs that may occur in the future. self.clock.call_later( - 2 * USER_DIRECTORY_STALE_REFRESH_TIME_MS // 1000 + 1, + USER_DIRECTORY_STALE_REFRESH_TIME * 2 + Duration(seconds=1), self.kick_off_remote_profile_refresh_process, ) return @@ -656,7 +657,9 @@ async def _unsafe_refresh_remote_profiles(self) -> None: if not users: return _, _, next_try_at_ts = users[0] - delay = ((next_try_at_ts - self.clock.time_msec()) // 1000) + 2 + delay = Duration( + milliseconds=next_try_at_ts - self.clock.time_msec() + ) + Duration(seconds=2) self._refresh_remote_profiles_call_later = self.clock.call_later( delay, self.kick_off_remote_profile_refresh_process, diff --git a/synapse/handlers/worker_lock.py b/synapse/handlers/worker_lock.py index 0e3fab292f..1537a18cc0 100644 --- a/synapse/handlers/worker_lock.py +++ b/synapse/handlers/worker_lock.py @@ -39,7 +39,7 @@ from synapse.storage.databases.main.lock import Lock, LockStore from synapse.util.async_helpers import timeout_deferred from synapse.util.clock import Clock -from synapse.util.constants import ONE_MINUTE_SECONDS +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.logging.opentracing import opentracing @@ -72,7 +72,7 @@ def __init__(self, hs: "HomeServer") -> None: # that lock. self._locks: dict[tuple[str, str], WeakSet[WaitingLock | WaitingMultiLock]] = {} - self._clock.looping_call(self._cleanup_locks, 30_000) + self._clock.looping_call(self._cleanup_locks, Duration(seconds=30)) self._notifier.add_lock_released_callback(self._on_lock_released) @@ -187,7 +187,7 @@ def _wake_all_locks( lock.release_lock() self._clock.call_later( - 0, + Duration(seconds=0), _wake_all_locks, locks, ) @@ -276,7 +276,7 @@ async def __aexit__( def _get_next_retry_interval(self) -> float: next = self._retry_interval self._retry_interval = max(5, next * 2) - if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations + if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations logger.warning( "Lock timeout is getting excessive: %ss. There may be a deadlock.", self._retry_interval, @@ -363,7 +363,7 @@ async def __aexit__( def _get_next_retry_interval(self) -> float: next = self._retry_interval self._retry_interval = max(5, next * 2) - if self._retry_interval > 10 * ONE_MINUTE_SECONDS: # >7 iterations + if self._retry_interval > Duration(minutes=10).as_secs(): # >7 iterations logger.warning( "Lock timeout is getting excessive: %ss. There may be a deadlock.", self._retry_interval, diff --git a/synapse/http/client.py b/synapse/http/client.py index cb9b8cd683..f0b9201086 100644 --- a/synapse/http/client.py +++ b/synapse/http/client.py @@ -87,6 +87,7 @@ from synapse.types import ISynapseReactor, StrSequence from synapse.util.async_helpers import timeout_deferred from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.json import json_decoder if TYPE_CHECKING: @@ -161,7 +162,9 @@ def _is_ip_blocked( return False -_EPSILON = 0.00000001 +# The delay used by the scheduler to schedule tasks "as soon as possible", while +# still allowing other tasks to run between runs. +_EPSILON = Duration(microseconds=1) def _make_scheduler(clock: Clock) -> Callable[[Callable[[], object]], IDelayedCall]: diff --git a/synapse/http/federation/well_known_resolver.py b/synapse/http/federation/well_known_resolver.py index ec72e178c9..303b3856a2 100644 --- a/synapse/http/federation/well_known_resolver.py +++ b/synapse/http/federation/well_known_resolver.py @@ -37,6 +37,7 @@ from synapse.types import ISynapseThreadlessReactor from synapse.util.caches.ttlcache import TTLCache from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.json import json_decoder from synapse.util.metrics import Measure @@ -315,7 +316,7 @@ async def _make_well_known_request( logger.info("Error fetching %s: %s. Retrying", uri_str, e) # Sleep briefly in the hopes that they come back up - await self._clock.sleep(0.5) + await self._clock.sleep(Duration(milliseconds=500)) def _cache_period_from_headers( diff --git a/synapse/http/server.py b/synapse/http/server.py index 5f4e7484fd..226cb00831 100644 --- a/synapse/http/server.py +++ b/synapse/http/server.py @@ -76,6 +76,7 @@ from synapse.util.caches import intern_dict from synapse.util.cancellation import is_function_cancellable from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.iterutils import chunk_seq from synapse.util.json import json_encoder @@ -334,7 +335,7 @@ async def _async_render_wrapper(self, request: "SynapseRequest") -> None: callback_return = await self._async_render(request) except LimitExceededError as e: if e.pause: - await self._clock.sleep(e.pause) + await self._clock.sleep(Duration(seconds=e.pause)) raise if callback_return is not None: diff --git a/synapse/media/media_repository.py b/synapse/media/media_repository.py index 7b4408b2bc..29c5e66ec4 100644 --- a/synapse/media/media_repository.py +++ b/synapse/media/media_repository.py @@ -70,6 +70,7 @@ from synapse.storage.databases.main.media_repository import LocalMedia, RemoteMedia from synapse.types import UserID from synapse.util.async_helpers import Linearizer +from synapse.util.duration import Duration from synapse.util.retryutils import NotRetryingDestination from synapse.util.stringutils import random_string @@ -80,10 +81,10 @@ # How often to run the background job to update the "recently accessed" # attribute of local and remote media. -UPDATE_RECENTLY_ACCESSED_TS = 60 * 1000 # 1 minute +UPDATE_RECENTLY_ACCESSED_TS = Duration(minutes=1) # How often to run the background job to check for local and remote media # that should be purged according to the configured media retention settings. -MEDIA_RETENTION_CHECK_PERIOD_MS = 60 * 60 * 1000 # 1 hour +MEDIA_RETENTION_CHECK_PERIOD = Duration(hours=1) class MediaRepository: @@ -166,7 +167,7 @@ def __init__(self, hs: "HomeServer"): # with the duration between runs dictated by the homeserver config. self.clock.looping_call( self._start_apply_media_retention_rules, - MEDIA_RETENTION_CHECK_PERIOD_MS, + MEDIA_RETENTION_CHECK_PERIOD, ) if hs.config.media.url_preview_enabled: @@ -485,7 +486,7 @@ async def get_local_media_info( if now >= wait_until: break - await self.clock.sleep(0.5) + await self.clock.sleep(Duration(milliseconds=500)) logger.info("Media %s has not yet been uploaded", media_id) self.respond_not_yet_uploaded(request) diff --git a/synapse/media/media_storage.py b/synapse/media/media_storage.py index bc12212c46..e83869bf4d 100644 --- a/synapse/media/media_storage.py +++ b/synapse/media/media_storage.py @@ -51,6 +51,7 @@ from synapse.logging.opentracing import start_active_span, trace, trace_with_opname from synapse.media._base import ThreadedFileSender from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.file_consumer import BackgroundFileConsumer from ..types import JsonDict @@ -457,7 +458,7 @@ async def write_chunks_to(self, callback: Callable[[bytes], object]) -> None: callback(chunk) # We yield to the reactor by sleeping for 0 seconds. - await self.clock.sleep(0) + await self.clock.sleep(Duration(seconds=0)) @implementer(interfaces.IConsumer) @@ -652,7 +653,7 @@ async def _resumeProducingRepeatedly(self) -> None: self.paused = False while not self.paused: producer.resumeProducing() - await self.clock.sleep(0) + await self.clock.sleep(Duration(seconds=0)) class Header: diff --git a/synapse/media/url_previewer.py b/synapse/media/url_previewer.py index bbd8017b13..2c5e518918 100644 --- a/synapse/media/url_previewer.py +++ b/synapse/media/url_previewer.py @@ -47,6 +47,7 @@ from synapse.types import JsonDict, UserID from synapse.util.async_helpers import ObservableDeferred from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.duration import Duration from synapse.util.json import json_encoder from synapse.util.stringutils import random_string @@ -208,7 +209,9 @@ def __init__( ) if self._worker_run_media_background_jobs: - self.clock.looping_call(self._start_expire_url_cache_data, 10 * 1000) + self.clock.looping_call( + self._start_expire_url_cache_data, Duration(seconds=10) + ) async def preview(self, url: str, user: UserID, ts: int) -> bytes: # the in-memory cache: diff --git a/synapse/metrics/common_usage_metrics.py b/synapse/metrics/common_usage_metrics.py index 0c3f380177..ea2cdecf51 100644 --- a/synapse/metrics/common_usage_metrics.py +++ b/synapse/metrics/common_usage_metrics.py @@ -24,6 +24,7 @@ import attr from synapse.metrics import SERVER_NAME_LABEL +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -112,7 +113,7 @@ def setup(self) -> None: ) self._clock.looping_call( self._hs.run_as_background_process, - 5 * 60 * 1000, + Duration(minutes=5), desc="common_usage_metrics_update_gauges", func=self._update_gauges, ) diff --git a/synapse/module_api/__init__.py b/synapse/module_api/__init__.py index 9b98c853d1..5f07849bd8 100644 --- a/synapse/module_api/__init__.py +++ b/synapse/module_api/__init__.py @@ -159,6 +159,7 @@ from synapse.util.async_helpers import maybe_awaitable from synapse.util.caches.descriptors import CachedFunction, cached as _cached from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.frozenutils import freeze if TYPE_CHECKING: @@ -1392,7 +1393,7 @@ def looping_background_call( if self._hs.config.worker.run_background_tasks or run_on_all_instances: self._clock.looping_call( self._hs.run_as_background_process, - msec, + Duration(milliseconds=msec), desc, lambda: maybe_awaitable(f(*args, **kwargs)), ) @@ -1447,8 +1448,7 @@ def delayed_background_call( desc = f.__name__ return self._clock.call_later( - # convert ms to seconds as needed by call_later. - msec * 0.001, + Duration(milliseconds=msec), self._hs.run_as_background_process, desc, lambda: maybe_awaitable(f(*args, **kwargs)), @@ -1460,7 +1460,7 @@ async def sleep(self, seconds: float) -> None: Added in Synapse v1.49.0. """ - await self._clock.sleep(seconds) + await self._clock.sleep(Duration(seconds=seconds)) async def send_http_push_notification( self, diff --git a/synapse/notifier.py b/synapse/notifier.py index 260a2c0d87..d8d2db17f1 100644 --- a/synapse/notifier.py +++ b/synapse/notifier.py @@ -61,6 +61,7 @@ from synapse.util.async_helpers import ( timeout_deferred, ) +from synapse.util.duration import Duration from synapse.util.stringutils import shortstr from synapse.visibility import filter_events_for_client @@ -235,7 +236,7 @@ class Notifier: Primarily used from the /events stream. """ - UNUSED_STREAM_EXPIRY_MS = 10 * 60 * 1000 + UNUSED_STREAM_EXPIRY = Duration(minutes=10) def __init__(self, hs: "HomeServer"): self.user_to_user_stream: dict[str, _NotifierUserStream] = {} @@ -269,9 +270,7 @@ def __init__(self, hs: "HomeServer"): self.state_handler = hs.get_state_handler() - self.clock.looping_call( - self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY_MS - ) + self.clock.looping_call(self.remove_expired_streams, self.UNUSED_STREAM_EXPIRY) # This is not a very cheap test to perform, but it's only executed # when rendering the metrics page, which is likely once per minute at @@ -861,7 +860,7 @@ async def wait_for_stream_token(self, stream_token: StreamToken) -> bool: logged = True # TODO: be better - await self.clock.sleep(0.5) + await self.clock.sleep(Duration(milliseconds=500)) async def _get_room_ids( self, user: UserID, explicit_room_id: str | None @@ -889,7 +888,7 @@ async def _is_world_readable(self, room_id: str) -> bool: def remove_expired_streams(self) -> None: time_now_ms = self.clock.time_msec() expired_streams = [] - expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY_MS + expire_before_ts = time_now_ms - self.UNUSED_STREAM_EXPIRY.as_millis() for stream in self.user_to_user_stream.values(): if stream.count_listeners(): continue diff --git a/synapse/push/emailpusher.py b/synapse/push/emailpusher.py index 36dc9bf6fc..ce4a2102e4 100644 --- a/synapse/push/emailpusher.py +++ b/synapse/push/emailpusher.py @@ -29,6 +29,7 @@ from synapse.push.mailer import Mailer from synapse.push.push_types import EmailReason from synapse.storage.databases.main.event_push_actions import EmailPushAction +from synapse.util.duration import Duration from synapse.util.threepids import validate_email if TYPE_CHECKING: @@ -229,7 +230,7 @@ async def _unsafe_process(self) -> None: if soonest_due_at is not None: delay = self.seconds_until(soonest_due_at) self.timed_call = self.hs.get_clock().call_later( - delay, + Duration(seconds=delay), self.on_timer, ) diff --git a/synapse/push/httppusher.py b/synapse/push/httppusher.py index edcabf0c29..1e7e742ddd 100644 --- a/synapse/push/httppusher.py +++ b/synapse/push/httppusher.py @@ -40,6 +40,7 @@ if TYPE_CHECKING: from synapse.server import HomeServer +from synapse.util.duration import Duration logger = logging.getLogger(__name__) @@ -336,7 +337,7 @@ async def _unsafe_process(self) -> None: else: logger.info("Push failed: delaying for %ds", self.backoff_delay) self.timed_call = self.hs.get_clock().call_later( - self.backoff_delay, + Duration(seconds=self.backoff_delay), self.on_timer, ) self.backoff_delay = min( @@ -371,7 +372,7 @@ async def _process_one(self, push_action: HttpPushAction) -> bool: delay_ms = random.randint(1, self.push_jitter_delay_ms) diff_ms = event.origin_server_ts + delay_ms - self.clock.time_msec() if diff_ms > 0: - await self.clock.sleep(diff_ms / 1000) + await self.clock.sleep(Duration(milliseconds=diff_ms)) rejected = await self.dispatch_push_event(event, tweaks, badge) if rejected is False: diff --git a/synapse/replication/http/_base.py b/synapse/replication/http/_base.py index d76b40cf39..2bab9c2d71 100644 --- a/synapse/replication/http/_base.py +++ b/synapse/replication/http/_base.py @@ -42,6 +42,7 @@ from synapse.types import JsonDict from synapse.util.caches.response_cache import ResponseCache from synapse.util.cancellation import is_function_cancellable +from synapse.util.duration import Duration from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -317,7 +318,7 @@ async def send_request( # If we timed out we probably don't need to worry about backing # off too much, but lets just wait a little anyway. - await clock.sleep(1) + await clock.sleep(Duration(seconds=1)) except (ConnectError, DNSLookupError) as e: if not cls.RETRY_ON_CONNECT_ERROR: raise @@ -332,7 +333,7 @@ async def send_request( e, ) - await clock.sleep(delay) + await clock.sleep(Duration(seconds=delay)) attempts += 1 except HttpResponseException as e: # We convert to SynapseError as we know that it was a SynapseError diff --git a/synapse/replication/tcp/client.py b/synapse/replication/tcp/client.py index 297feb0049..fdda932ead 100644 --- a/synapse/replication/tcp/client.py +++ b/synapse/replication/tcp/client.py @@ -55,6 +55,7 @@ ) from synapse.types import PersistedEventPosition, ReadReceipt, StreamKeyType, UserID from synapse.util.async_helpers import Linearizer, timeout_deferred +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -173,7 +174,7 @@ async def on_rdata( ) # Yield to reactor so that we don't block. - await self._clock.sleep(0) + await self._clock.sleep(Duration(seconds=0)) elif stream_name == PushersStream.NAME: for row in rows: if row.deleted: diff --git a/synapse/replication/tcp/protocol.py b/synapse/replication/tcp/protocol.py index 3068e60af0..489a2c76a6 100644 --- a/synapse/replication/tcp/protocol.py +++ b/synapse/replication/tcp/protocol.py @@ -55,6 +55,7 @@ parse_command_from_line, ) from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -193,7 +194,9 @@ def connectionMade(self) -> None: self._send_pending_commands() # Starts sending pings - self._send_ping_loop = self.clock.looping_call(self.send_ping, 5000) + self._send_ping_loop = self.clock.looping_call( + self.send_ping, Duration(seconds=5) + ) # Always send the initial PING so that the other side knows that they # can time us out. diff --git a/synapse/replication/tcp/redis.py b/synapse/replication/tcp/redis.py index 27d43e6fba..93ba48b406 100644 --- a/synapse/replication/tcp/redis.py +++ b/synapse/replication/tcp/redis.py @@ -53,6 +53,7 @@ tcp_inbound_commands_counter, tcp_outbound_commands_counter, ) +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.replication.tcp.handler import ReplicationCommandHandler @@ -317,7 +318,7 @@ def __init__( self.hs = hs # nb must be called this for @wrap_as_background_process self.server_name = hs.hostname - hs.get_clock().looping_call(self._send_ping, 30 * 1000) + hs.get_clock().looping_call(self._send_ping, Duration(seconds=30)) @wrap_as_background_process("redis_ping") async def _send_ping(self) -> None: diff --git a/synapse/replication/tcp/resource.py b/synapse/replication/tcp/resource.py index 134d8d921f..36dd39ed67 100644 --- a/synapse/replication/tcp/resource.py +++ b/synapse/replication/tcp/resource.py @@ -34,6 +34,7 @@ from synapse.replication.tcp.protocol import ServerReplicationStreamProtocol from synapse.replication.tcp.streams import EventsStream from synapse.replication.tcp.streams._base import CachesStream, StreamRow, Token +from synapse.util.duration import Duration from synapse.util.metrics import Measure if TYPE_CHECKING: @@ -116,7 +117,7 @@ def __init__(self, hs: "HomeServer"): # # Note that if the position hasn't advanced then we won't send anything. if any(EventsStream.NAME == s.NAME for s in self.streams): - self.clock.looping_call(self.on_notifier_poke, 1000) + self.clock.looping_call(self.on_notifier_poke, Duration(seconds=1)) def on_notifier_poke(self) -> None: """Checks if there is actually any new data and sends it to the diff --git a/synapse/rest/admin/rooms.py b/synapse/rest/admin/rooms.py index cf24bc628a..a886859ffa 100644 --- a/synapse/rest/admin/rooms.py +++ b/synapse/rest/admin/rooms.py @@ -28,9 +28,13 @@ from synapse.api.constants import Direction, EventTypes, JoinRules, Membership from synapse.api.errors import AuthError, Codes, NotFoundError, SynapseError from synapse.api.filtering import Filter +from synapse.events.utils import ( + SerializeEventConfig, +) from synapse.handlers.pagination import ( PURGE_ROOM_ACTION_NAME, SHUTDOWN_AND_PURGE_ROOM_ACTION_NAME, + GetMessagesResult, ) from synapse.http.servlet import ( ResolveRoomIdMixin, @@ -44,11 +48,13 @@ parse_string, ) from synapse.http.site import SynapseRequest +from synapse.logging.opentracing import trace from synapse.rest.admin._base import ( admin_patterns, assert_requester_is_admin, assert_user_is_admin, ) +from synapse.rest.client.room import SerializeMessagesDeps, encode_messages_response from synapse.storage.databases.main.room import RoomSortOrder from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, RoomID, ScheduledTask, UserID, create_requester @@ -976,6 +982,7 @@ def __init__(self, hs: "HomeServer"): self._pagination_handler = hs.get_pagination_handler() self._auth = hs.get_auth() self._store = hs.get_datastores().main + self._event_serializer = hs.get_event_client_serializer() async def on_GET( self, request: SynapseRequest, room_id: str @@ -999,7 +1006,11 @@ async def on_GET( ): as_client_event = False - msgs = await self._pagination_handler.get_messages( + serialize_options = SerializeEventConfig( + as_client_event=as_client_event, requester=requester + ) + + get_messages_result = await self._pagination_handler.get_messages( room_id=room_id, requester=requester, pagin_config=pagination_config, @@ -1008,7 +1019,27 @@ async def on_GET( use_admin_priviledge=True, ) - return HTTPStatus.OK, msgs + response_content = await self.encode_response( + get_messages_result, serialize_options + ) + + return HTTPStatus.OK, response_content + + @trace + async def encode_response( + self, + get_messages_result: GetMessagesResult, + serialize_options: SerializeEventConfig, + ) -> JsonDict: + return await encode_messages_response( + get_messages_result=get_messages_result, + serialize_options=serialize_options, + serialize_deps=SerializeMessagesDeps( + clock=self._clock, + event_serializer=self._event_serializer, + store=self._store, + ), + ) class RoomTimestampToEventRestServlet(RestServlet): diff --git a/synapse/rest/client/account.py b/synapse/rest/client/account.py index b052052be0..3cb1e09f44 100644 --- a/synapse/rest/client/account.py +++ b/synapse/rest/client/account.py @@ -58,6 +58,7 @@ EmailRequestTokenBody, MsisdnRequestTokenBody, ) +from synapse.util.duration import Duration from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.stringutils import assert_valid_client_secret, random_string from synapse.util.threepids import check_3pid_allowed, validate_email @@ -125,7 +126,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email not found", Codes.THREEPID_NOT_FOUND) @@ -383,7 +386,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) @@ -449,7 +454,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} logger.info("MSISDN %s is already in use by %s", msisdn, existing_user_id) diff --git a/synapse/rest/client/delayed_events.py b/synapse/rest/client/delayed_events.py index 69d1013e72..7afecffe2d 100644 --- a/synapse/rest/client/delayed_events.py +++ b/synapse/rest/client/delayed_events.py @@ -156,10 +156,10 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - # The following can't currently be instantiated on workers. + # Most of the following can't currently be instantiated on workers. if hs.config.worker.worker_app is None: UpdateDelayedEventServlet(hs).register(http_server) CancelDelayedEventServlet(hs).register(http_server) - RestartDelayedEventServlet(hs).register(http_server) SendDelayedEventServlet(hs).register(http_server) + RestartDelayedEventServlet(hs).register(http_server) DelayedEventsServlet(hs).register(http_server) diff --git a/synapse/rest/client/mutual_rooms.py b/synapse/rest/client/mutual_rooms.py index bda6ed1f70..3e5316c4b7 100644 --- a/synapse/rest/client/mutual_rooms.py +++ b/synapse/rest/client/mutual_rooms.py @@ -90,4 +90,5 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: def register_servlets(hs: "HomeServer", http_server: HttpServer) -> None: - UserMutualRoomsServlet(hs).register(http_server) + if hs.config.experimental.msc2666_enabled: + UserMutualRoomsServlet(hs).register(http_server) diff --git a/synapse/rest/client/register.py b/synapse/rest/client/register.py index 9503446b92..fdd2f1985a 100644 --- a/synapse/rest/client/register.py +++ b/synapse/rest/client/register.py @@ -59,6 +59,7 @@ from synapse.metrics import SERVER_NAME_LABEL, threepid_send_requests from synapse.push.mailer import Mailer from synapse.types import JsonDict +from synapse.util.duration import Duration from synapse.util.msisdn import phone_number_to_msisdn from synapse.util.ratelimitutils import FederationRateLimiter from synapse.util.stringutils import assert_valid_client_secret, random_string @@ -150,7 +151,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. await self.already_in_use_mailer.send_already_in_use_mail(email) - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} raise SynapseError(400, "Email is already in use", Codes.THREEPID_IN_USE) @@ -219,7 +222,9 @@ async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: # comments for request_token_inhibit_3pid_errors. # Also wait for some random amount of time between 100ms and 1s to make it # look like we did something. - await self.hs.get_clock().sleep(random.randint(1, 10) / 10) + await self.hs.get_clock().sleep( + Duration(milliseconds=random.randint(100, 1000)) + ) return 200, {"sid": random_string(16)} raise SynapseError( diff --git a/synapse/rest/client/room.py b/synapse/rest/client/room.py index 81a6bd57fc..5e7dcb0191 100644 --- a/synapse/rest/client/room.py +++ b/synapse/rest/client/room.py @@ -28,6 +28,7 @@ from typing import TYPE_CHECKING, Awaitable from urllib import parse as urlparse +import attr from prometheus_client.core import Histogram from twisted.web.server import Request @@ -45,10 +46,12 @@ ) from synapse.api.filtering import Filter from synapse.events.utils import ( + EventClientSerializer, SerializeEventConfig, format_event_for_client_v2, serialize_event, ) +from synapse.handlers.pagination import GetMessagesResult from synapse.http.server import HttpServer from synapse.http.servlet import ( ResolveRoomIdMixin, @@ -64,15 +67,17 @@ ) from synapse.http.site import SynapseRequest from synapse.logging.context import make_deferred_yieldable, run_in_background -from synapse.logging.opentracing import set_tag +from synapse.logging.opentracing import set_tag, trace from synapse.metrics import SERVER_NAME_LABEL from synapse.rest.client._base import client_patterns from synapse.rest.client.transactions import HttpTransactionCache from synapse.state import CREATE_KEY, POWER_KEY +from synapse.storage.databases.main import DataStore from synapse.streams.config import PaginationConfig from synapse.types import JsonDict, Requester, StreamToken, ThirdPartyInstanceID, UserID from synapse.types.state import StateFilter from synapse.util.cancellation import cancellable +from synapse.util.clock import Clock from synapse.util.events import generate_fake_event_id from synapse.util.stringutils import parse_and_validate_server_name @@ -790,6 +795,56 @@ async def on_GET( return 200, {"joined": users_with_profile} +@attr.s(slots=True, frozen=True, auto_attribs=True) +class SerializeMessagesDeps: + clock: Clock + event_serializer: EventClientSerializer + store: DataStore + + +@trace +async def encode_messages_response( + *, + get_messages_result: GetMessagesResult, + serialize_options: SerializeEventConfig, + serialize_deps: SerializeMessagesDeps, +) -> JsonDict: + """ + Serialize a `GetMessagesResult` into the JSON response format for the `/messages` + endpoint. + + This logic is shared between the client API and Synapse admin API. + """ + + time_now = serialize_deps.clock.time_msec() + + serialized_result = { + "chunk": ( + await serialize_deps.event_serializer.serialize_events( + get_messages_result.messages_chunk, + time_now, + config=serialize_options, + bundle_aggregations=get_messages_result.bundled_aggregations, + ) + ), + "start": await get_messages_result.start_token.to_string(serialize_deps.store), + } + + if get_messages_result.end_token is not None: + serialized_result["end"] = await get_messages_result.end_token.to_string( + serialize_deps.store + ) + + if get_messages_result.state is not None: + serialized_result[ + "state" + ] = await serialize_deps.event_serializer.serialize_events( + get_messages_result.state, time_now, config=serialize_options + ) + + return serialized_result + + # TODO: Needs better unit testing class RoomMessageListRestServlet(RestServlet): PATTERNS = client_patterns("/rooms/(?P[^/]*)/messages$", v1=True) @@ -806,6 +861,7 @@ def __init__(self, hs: "HomeServer"): self.pagination_handler = hs.get_pagination_handler() self.auth = hs.get_auth() self.store = hs.get_datastores().main + self.event_serializer = hs.get_event_client_serializer() async def on_GET( self, request: SynapseRequest, room_id: str @@ -839,7 +895,11 @@ async def on_GET( ): as_client_event = False - msgs = await self.pagination_handler.get_messages( + serialize_options = SerializeEventConfig( + as_client_event=as_client_event, requester=requester + ) + + get_messages_result = await self.pagination_handler.get_messages( room_id=room_id, requester=requester, pagin_config=pagination_config, @@ -847,6 +907,24 @@ async def on_GET( event_filter=event_filter, ) + # Useful for debugging timeline/pagination issues. For example, if a client + # isn't seeing the full history, we can check the homeserver logs to see if the + # client just never made the next request with the given `end` token. + logger.info( + "Responding to `/messages` request: {%s} %s %s -> %d messages with end_token=%s", + requester.user.to_string(), + request.get_method(), + request.get_redacted_uri(), + len(get_messages_result.messages_chunk), + (await get_messages_result.end_token.to_string(self.store)) + if get_messages_result.end_token + else None, + ) + + response_content = await self.encode_response( + get_messages_result, serialize_options + ) + processing_end_time = self.clock.time_msec() room_member_count = await make_deferred_yieldable(room_member_count_deferred) messsages_response_timer.labels( @@ -854,7 +932,23 @@ async def on_GET( **{SERVER_NAME_LABEL: self.server_name}, ).observe((processing_end_time - processing_start_time) / 1000) - return 200, msgs + return 200, response_content + + @trace + async def encode_response( + self, + get_messages_result: GetMessagesResult, + serialize_options: SerializeEventConfig, + ) -> JsonDict: + return await encode_messages_response( + get_messages_result=get_messages_result, + serialize_options=serialize_options, + serialize_deps=SerializeMessagesDeps( + clock=self.clock, + event_serializer=self.event_serializer, + store=self.store, + ), + ) # TODO: Needs unit testing diff --git a/synapse/rest/client/transactions.py b/synapse/rest/client/transactions.py index 4b3656a597..43c7b6f993 100644 --- a/synapse/rest/client/transactions.py +++ b/synapse/rest/client/transactions.py @@ -34,13 +34,14 @@ from synapse.logging.context import make_deferred_yieldable, run_in_background from synapse.types import JsonDict, Requester from synapse.util.async_helpers import ObservableDeferred +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer logger = logging.getLogger(__name__) -CLEANUP_PERIOD_MS = 1000 * 60 * 30 # 30 mins +CLEANUP_PERIOD = Duration(minutes=30) P = ParamSpec("P") @@ -56,7 +57,7 @@ def __init__(self, hs: "HomeServer"): ] = {} # Try to clean entries every 30 mins. This means entries will exist # for at *LEAST* 30 mins, and at *MOST* 60 mins. - self.clock.looping_call(self._cleanup, CLEANUP_PERIOD_MS) + self.clock.looping_call(self._cleanup, CLEANUP_PERIOD) def _get_transaction_key(self, request: IRequest, requester: Requester) -> Hashable: """A helper function which returns a transaction key that can be used @@ -145,5 +146,5 @@ def _cleanup(self) -> None: now = self.clock.time_msec() for key in list(self.transactions): ts = self.transactions[key][1] - if now > (ts + CLEANUP_PERIOD_MS): # after cleanup period + if now > (ts + CLEANUP_PERIOD.as_millis()): # after cleanup period del self.transactions[key] diff --git a/synapse/rest/client/versions.py b/synapse/rest/client/versions.py index dee2cdb637..75f27c98de 100644 --- a/synapse/rest/client/versions.py +++ b/synapse/rest/client/versions.py @@ -124,7 +124,7 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: # Implements additional endpoints as described in MSC2432 "org.matrix.msc2432": True, # Implements additional endpoints as described in MSC2666 - "uk.half-shot.msc2666.query_mutual_rooms": True, + "uk.half-shot.msc2666.query_mutual_rooms": self.config.experimental.msc2666_enabled, # Whether new rooms will be set to encrypted or not (based on presets). "io.element.e2ee_forced.public": self.e2ee_forced_public, "io.element.e2ee_forced.private": self.e2ee_forced_private, @@ -182,6 +182,8 @@ async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: "org.matrix.msc4306": self.config.experimental.msc4306_enabled, # MSC4169: Backwards-compatible redaction sending using `/send` "com.beeper.msc4169": self.config.experimental.msc4169_enabled, + # MSC4380: Invite blocking + "org.matrix.msc4380": self.config.experimental.msc4380_enabled, }, }, ) diff --git a/synapse/state/__init__.py b/synapse/state/__init__.py index 9fc49be4b1..a92233c863 100644 --- a/synapse/state/__init__.py +++ b/synapse/state/__init__.py @@ -54,6 +54,7 @@ from synapse.types.state import StateFilter from synapse.util.async_helpers import Linearizer from synapse.util.caches.expiringcache import ExpiringCache +from synapse.util.duration import Duration from synapse.util.metrics import Measure, measure_func from synapse.util.stringutils import shortstr @@ -663,7 +664,7 @@ def __init__(self, hs: "HomeServer"): _StateResMetrics ) - self.clock.looping_call(self._report_metrics, 120 * 1000) + self.clock.looping_call(self._report_metrics, Duration(minutes=2)) async def resolve_state_groups( self, diff --git a/synapse/state/v2.py b/synapse/state/v2.py index c410c3a7ec..1241a4d66e 100644 --- a/synapse/state/v2.py +++ b/synapse/state/v2.py @@ -40,6 +40,7 @@ from synapse.events import EventBase, is_creator from synapse.storage.databases.main.event_federation import StateDifference from synapse.types import MutableStateMap, StateMap, StrCollection +from synapse.util.duration import Duration logger = logging.getLogger(__name__) @@ -48,7 +49,7 @@ class Clock(Protocol): # This is usually synapse.util.Clock, but it's replaced with a FakeClock in tests. # We only ever sleep(0) though, so that other async functions can make forward # progress without waiting for stateres to complete. - async def sleep(self, duration_ms: float) -> None: ... + async def sleep(self, duration: Duration) -> None: ... class StateResolutionStore(Protocol): @@ -639,7 +640,7 @@ async def _reverse_topological_power_sort( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) event_to_pl = {} for idx, event_id in enumerate(graph, start=1): @@ -651,7 +652,7 @@ async def _reverse_topological_power_sort( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) def _get_power_order(event_id: str) -> tuple[int, int, str]: ev = event_map[event_id] @@ -745,7 +746,7 @@ async def _iterative_auth_checks( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) return resolved_state @@ -796,7 +797,7 @@ async def _mainline_sort( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx != 0 and idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) idx += 1 @@ -814,7 +815,7 @@ async def _mainline_sort( # We await occasionally when we're working with large data sets to # ensure that we don't block the reactor loop for too long. if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) event_ids.sort(key=lambda ev_id: order_map[ev_id]) @@ -865,7 +866,7 @@ async def _get_mainline_depth_for_event( idx += 1 if idx % _AWAIT_AFTER_ITERATIONS == 0: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) # Didn't find a power level auth event, so we just return 0 return 0 diff --git a/synapse/storage/background_updates.py b/synapse/storage/background_updates.py index c71bcdb7fb..311534c5e7 100644 --- a/synapse/storage/background_updates.py +++ b/synapse/storage/background_updates.py @@ -40,6 +40,7 @@ from synapse.storage.types import Connection, Cursor from synapse.types import JsonDict, StrCollection from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.json import json_encoder from . import engines @@ -162,7 +163,7 @@ def __init__( async def __aenter__(self) -> int: if self._sleep: - await self._clock.sleep(self._sleep_duration_ms / 1000) + await self._clock.sleep(Duration(milliseconds=self._sleep_duration_ms)) return self._update_duration_ms diff --git a/synapse/storage/controllers/purge_events.py b/synapse/storage/controllers/purge_events.py index 4ca3f8f4e1..8a2053d25a 100644 --- a/synapse/storage/controllers/purge_events.py +++ b/synapse/storage/controllers/purge_events.py @@ -32,6 +32,7 @@ from synapse.storage.database import LoggingTransaction from synapse.storage.databases import Databases from synapse.types.storage import _BackgroundUpdates +from synapse.util.duration import Duration from synapse.util.stringutils import shortstr if TYPE_CHECKING: @@ -50,7 +51,7 @@ def __init__(self, hs: "HomeServer", stores: Databases): if hs.config.worker.run_background_tasks: self._delete_state_loop_call = hs.get_clock().looping_call( - self._delete_state_groups_loop, 60 * 1000 + self._delete_state_groups_loop, Duration(minutes=1) ) self.stores.state.db_pool.updates.register_background_update_handler( diff --git a/synapse/storage/controllers/state.py b/synapse/storage/controllers/state.py index 9c5e837ab0..4885268305 100644 --- a/synapse/storage/controllers/state.py +++ b/synapse/storage/controllers/state.py @@ -683,7 +683,7 @@ async def get_current_state_deltas( # https://github.com/matrix-org/synapse/issues/13008 return await self.stores.main.get_partial_current_state_deltas( - prev_stream_id, max_stream_id + prev_stream_id, max_stream_id, limit=100 ) @trace diff --git a/synapse/storage/database.py b/synapse/storage/database.py index 18f0eac585..2d5e1d3c48 100644 --- a/synapse/storage/database.py +++ b/synapse/storage/database.py @@ -62,6 +62,7 @@ from synapse.storage.types import Connection, Cursor, SQLQueryParameters from synapse.types import StrCollection from synapse.util.async_helpers import delay_cancellation +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -631,7 +632,7 @@ def __init__( # Check ASAP (and then later, every 1s) to see if we have finished # background updates of tables that aren't safe to update. self._clock.call_later( - 0.0, + Duration(seconds=0), self.hs.run_as_background_process, "upsert_safety_check", self._check_safe_to_upsert, @@ -679,7 +680,7 @@ async def _check_safe_to_upsert(self) -> None: # If there's any updates still running, reschedule to run. if background_update_names: self._clock.call_later( - 15.0, + Duration(seconds=15), self.hs.run_as_background_process, "upsert_safety_check", self._check_safe_to_upsert, @@ -706,7 +707,7 @@ def loop() -> None: "Total database time: %.3f%% {%s}", ratio * 100, top_three_counters ) - self._clock.looping_call(loop, 10000) + self._clock.looping_call(loop, Duration(seconds=10)) def new_transaction( self, diff --git a/synapse/storage/databases/main/account_data.py b/synapse/storage/databases/main/account_data.py index 15728cf618..71182cdab2 100644 --- a/synapse/storage/databases/main/account_data.py +++ b/synapse/storage/databases/main/account_data.py @@ -40,7 +40,12 @@ ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.push_rule import PushRulesWorkerStore -from synapse.storage.invite_rule import InviteRulesConfig +from synapse.storage.invite_rule import ( + AllowAllInviteRulesConfig, + InviteRulesConfig, + MSC4155InviteRulesConfig, + MSC4380InviteRulesConfig, +) from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import JsonDict, JsonMapping from synapse.util.caches.descriptors import cached @@ -104,6 +109,7 @@ def __init__( ) self._msc4155_enabled = hs.config.experimental.msc4155_enabled + self._msc4380_enabled = hs.config.experimental.msc4380_enabled def get_max_account_data_stream_id(self) -> int: """Get the current max stream ID for account data stream @@ -562,20 +568,28 @@ async def ignored_users(self, user_id: str) -> frozenset[str]: async def get_invite_config_for_user(self, user_id: str) -> InviteRulesConfig: """ - Get the invite configuration for the current user. + Get the invite configuration for the given user. Args: - user_id: + user_id: The user whose invite configuration should be returned. """ + if self._msc4380_enabled: + data = await self.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG + ) + # If the user has an MSC4380-style config setting, prioritise that + # above an MSC4155 one + if data is not None: + return MSC4380InviteRulesConfig.from_account_data(data) + + if self._msc4155_enabled: + data = await self.get_global_account_data_by_type_for_user( + user_id, AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG + ) + if data is not None: + return MSC4155InviteRulesConfig(data) - if not self._msc4155_enabled: - # This equates to allowing all invites, as if the setting was off. - return InviteRulesConfig(None) - - data = await self.get_global_account_data_by_type_for_user( - user_id, AccountDataTypes.MSC4155_INVITE_PERMISSION_CONFIG - ) - return InviteRulesConfig(data) + return AllowAllInviteRulesConfig() async def get_admin_client_config_for_user(self, user_id: str) -> AdminClientConfig: """ diff --git a/synapse/storage/databases/main/cache.py b/synapse/storage/databases/main/cache.py index b7b9b42461..a4530796f2 100644 --- a/synapse/storage/databases/main/cache.py +++ b/synapse/storage/databases/main/cache.py @@ -45,6 +45,7 @@ from synapse.storage.engines import PostgresEngine from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.util.caches.descriptors import CachedFunction +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter if TYPE_CHECKING: @@ -71,11 +72,11 @@ # How long between cache invalidation table cleanups, once we have caught up # with the backlog. -REGULAR_CLEANUP_INTERVAL_MS = Config.parse_duration("1h") +REGULAR_CLEANUP_INTERVAL = Duration(hours=1) # How long between cache invalidation table cleanups, before we have caught # up with the backlog. -CATCH_UP_CLEANUP_INTERVAL_MS = Config.parse_duration("1m") +CATCH_UP_CLEANUP_INTERVAL = Duration(minutes=1) # Maximum number of cache invalidation rows to delete at once. CLEAN_UP_MAX_BATCH_SIZE = 20_000 @@ -139,7 +140,7 @@ def __init__( self.database_engine, PostgresEngine ): self.hs.get_clock().call_later( - CATCH_UP_CLEANUP_INTERVAL_MS / 1000, + CATCH_UP_CLEANUP_INTERVAL, self._clean_up_cache_invalidation_wrapper, ) @@ -825,12 +826,12 @@ async def _clean_up_cache_invalidation_wrapper(self) -> None: # Vary how long we wait before calling again depending on whether we # are still sifting through backlog or we have caught up. if in_backlog: - next_interval = CATCH_UP_CLEANUP_INTERVAL_MS + next_interval = CATCH_UP_CLEANUP_INTERVAL else: - next_interval = REGULAR_CLEANUP_INTERVAL_MS + next_interval = REGULAR_CLEANUP_INTERVAL self.hs.get_clock().call_later( - next_interval / 1000, + next_interval, self._clean_up_cache_invalidation_wrapper, ) diff --git a/synapse/storage/databases/main/censor_events.py b/synapse/storage/databases/main/censor_events.py index 5d667a5345..a5ae4bf506 100644 --- a/synapse/storage/databases/main/censor_events.py +++ b/synapse/storage/databases/main/censor_events.py @@ -32,6 +32,7 @@ ) from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.storage.databases.main.events_worker import EventsWorkerStore +from synapse.util.duration import Duration from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -54,7 +55,7 @@ def __init__( hs.config.worker.run_background_tasks and self.hs.config.server.redaction_retention_period is not None ): - hs.get_clock().looping_call(self._censor_redactions, 5 * 60 * 1000) + hs.get_clock().looping_call(self._censor_redactions, Duration(minutes=5)) @wrap_as_background_process("_censor_redactions") async def _censor_redactions(self) -> None: diff --git a/synapse/storage/databases/main/client_ips.py b/synapse/storage/databases/main/client_ips.py index 4948d0c286..7cd3667a2b 100644 --- a/synapse/storage/databases/main/client_ips.py +++ b/synapse/storage/databases/main/client_ips.py @@ -42,6 +42,7 @@ ) from synapse.types import JsonDict, UserID from synapse.util.caches.lrucache import LruCache +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -437,7 +438,7 @@ def __init__( ) if hs.config.worker.run_background_tasks and self.user_ips_max_age: - self.clock.looping_call(self._prune_old_user_ips, 5 * 1000) + self.clock.looping_call(self._prune_old_user_ips, Duration(seconds=5)) if self._update_on_this_worker: # This is the designated worker that can write to the client IP @@ -448,7 +449,7 @@ def __init__( tuple[str, str, str], tuple[str, str | None, int] ] = {} - self.clock.looping_call(self._update_client_ips_batch, 5 * 1000) + self.clock.looping_call(self._update_client_ips_batch, Duration(seconds=5)) hs.register_async_shutdown_handler( phase="before", eventType="shutdown", diff --git a/synapse/storage/databases/main/delayed_events.py b/synapse/storage/databases/main/delayed_events.py index 7f72be46f5..5547150515 100644 --- a/synapse/storage/databases/main/delayed_events.py +++ b/synapse/storage/databases/main/delayed_events.py @@ -259,7 +259,7 @@ async def get_all_delayed_events_for_user( ] async def process_timeout_delayed_events( - self, current_ts: Timestamp + self, current_ts: Timestamp, reprocess_events: bool = False ) -> tuple[ list[DelayedEventDetails], Timestamp | None, @@ -268,6 +268,16 @@ async def process_timeout_delayed_events( Marks for processing all delayed events that should have been sent prior to the provided time that haven't already been marked as such. + Args: + current_ts: The current timestamp. + reprocess_events: Whether to reprocess already-processed delayed + events. If set to True, events which are marked as processed + will have their `send_ts` re-checked. + + This is mainly useful for recovering from a server restart; + which could have occurred between an event being marked as + processed and the event actually being sent. + Returns: The details of all newly-processed delayed events, and the send time of the next delayed event to be sent, if any. """ @@ -292,7 +302,12 @@ def process_timeout_delayed_events_txn( ) ) sql_update = "UPDATE delayed_events SET is_processed = TRUE" - sql_where = "WHERE send_ts <= ? AND NOT is_processed" + sql_where = "WHERE send_ts <= ?" + + if not reprocess_events: + # Skip already-processed events. + sql_where += " AND NOT is_processed" + sql_args = (current_ts,) sql_order = "ORDER BY send_ts" if isinstance(self.database_engine, PostgresEngine): diff --git a/synapse/storage/databases/main/deviceinbox.py b/synapse/storage/databases/main/deviceinbox.py index a12411d723..fc61f46c1c 100644 --- a/synapse/storage/databases/main/deviceinbox.py +++ b/synapse/storage/databases/main/deviceinbox.py @@ -48,9 +48,9 @@ ) from synapse.storage.util.id_generators import MultiWriterIdGenerator from synapse.types import JsonDict, StrCollection -from synapse.util import Duration from synapse.util.caches.expiringcache import ExpiringCache from synapse.util.caches.stream_change_cache import StreamChangeCache +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.json import json_encoder from synapse.util.stringutils import parse_and_validate_server_name @@ -62,10 +62,10 @@ # How long to keep messages in the device federation inbox before deleting them. -DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS = 7 * Duration.DAY_MS +DEVICE_FEDERATION_INBOX_CLEANUP_DELAY = Duration(days=7) # How often to run the task to clean up old device_federation_inbox rows. -DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS = 5 * Duration.MINUTE_MS +DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL = Duration(minutes=5) # Update name for the device federation inbox received timestamp index. DEVICE_FEDERATION_INBOX_RECEIVED_INDEX_UPDATE = ( @@ -152,7 +152,7 @@ def __init__( if hs.config.worker.run_background_tasks: self.clock.looping_call( run_as_background_process, - DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL_MS, + DEVICE_FEDERATION_INBOX_CLEANUP_INTERVAL, "_delete_old_federation_inbox_rows", self.server_name, self._delete_old_federation_inbox_rows, @@ -996,9 +996,10 @@ async def _delete_old_federation_inbox_rows(self, batch_size: int = 1000) -> Non def _delete_old_federation_inbox_rows_txn(txn: LoggingTransaction) -> bool: # We delete at most 100 rows that are older than - # DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS + # DEVICE_FEDERATION_INBOX_CLEANUP_DELAY delete_before_ts = ( - self.clock.time_msec() - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS + self.clock.time_msec() + - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_millis() ) sql = """ WITH to_delete AS ( @@ -1028,7 +1029,7 @@ def _delete_old_federation_inbox_rows_txn(txn: LoggingTransaction) -> bool: # We sleep a bit so that we don't hammer the database in a tight # loop first time we run this. - await self.clock.sleep(1) + await self.clock.sleep(Duration(seconds=1)) async def get_devices_with_messages( self, user_id: str, device_ids: StrCollection diff --git a/synapse/storage/databases/main/devices.py b/synapse/storage/databases/main/devices.py index caae2a0648..cbad40faf7 100644 --- a/synapse/storage/databases/main/devices.py +++ b/synapse/storage/databases/main/devices.py @@ -62,6 +62,7 @@ from synapse.util.caches.descriptors import cached, cachedList from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.json import json_decoder, json_encoder from synapse.util.stringutils import shortstr @@ -191,7 +192,7 @@ def __init__( if hs.config.worker.run_background_tasks: self.clock.looping_call( - self._prune_old_outbound_device_pokes, 60 * 60 * 1000 + self._prune_old_outbound_device_pokes, Duration(hours=1) ) def process_replication_rows( diff --git a/synapse/storage/databases/main/event_federation.py b/synapse/storage/databases/main/event_federation.py index b2f0aeaf58..cc7083b605 100644 --- a/synapse/storage/databases/main/event_federation.py +++ b/synapse/storage/databases/main/event_federation.py @@ -56,6 +56,7 @@ from synapse.util.caches.descriptors import cached from synapse.util.caches.lrucache import LruCache from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.json import json_encoder @@ -155,7 +156,7 @@ def __init__( if hs.config.worker.run_background_tasks: hs.get_clock().looping_call( - self._delete_old_forward_extrem_cache, 60 * 60 * 1000 + self._delete_old_forward_extrem_cache, Duration(hours=1) ) # Cache of event ID to list of auth event IDs and their depths. @@ -171,7 +172,9 @@ def __init__( # index. self.tests_allow_no_chain_cover_index = True - self.clock.looping_call(self._get_stats_for_federation_staging, 30 * 1000) + self.clock.looping_call( + self._get_stats_for_federation_staging, Duration(seconds=30) + ) if isinstance(self.database_engine, PostgresEngine): self.db_pool.updates.register_background_validate_constraint_and_delete_rows( diff --git a/synapse/storage/databases/main/event_push_actions.py b/synapse/storage/databases/main/event_push_actions.py index 2e99d7314e..a66caa672c 100644 --- a/synapse/storage/databases/main/event_push_actions.py +++ b/synapse/storage/databases/main/event_push_actions.py @@ -105,6 +105,7 @@ from synapse.storage.databases.main.stream import StreamWorkerStore from synapse.types import JsonDict, StrCollection from synapse.util.caches.descriptors import cached +from synapse.util.duration import Duration from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -270,15 +271,17 @@ def __init__( self._find_stream_orderings_for_times_txn(cur) cur.close() - self.clock.looping_call(self._find_stream_orderings_for_times, 10 * 60 * 1000) + self.clock.looping_call( + self._find_stream_orderings_for_times, Duration(minutes=10) + ) self._rotate_count = 10000 self._doing_notif_rotation = False if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._rotate_notifs, 30 * 1000) + self.clock.looping_call(self._rotate_notifs, Duration(seconds=30)) self.clock.looping_call( - self._clear_old_push_actions_staging, 30 * 60 * 1000 + self._clear_old_push_actions_staging, Duration(minutes=30) ) self.db_pool.updates.register_background_index_update( @@ -1817,7 +1820,7 @@ def _clear_old_push_actions_staging_txn(txn: LoggingTransaction) -> bool: return # We sleep to ensure that we don't overwhelm the DB. - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) async def get_push_actions_for_user( self, diff --git a/synapse/storage/databases/main/events_worker.py b/synapse/storage/databases/main/events_worker.py index 4cf708442d..ae6ee50dc2 100644 --- a/synapse/storage/databases/main/events_worker.py +++ b/synapse/storage/databases/main/events_worker.py @@ -92,6 +92,7 @@ from synapse.util.caches.lrucache import AsyncLruCache from synapse.util.caches.stream_change_cache import StreamChangeCache from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -278,7 +279,7 @@ def __init__( # We periodically clean out old transaction ID mappings self.clock.looping_call( self._cleanup_old_transaction_ids, - 5 * 60 * 1000, + Duration(minutes=5), ) self._get_event_cache: AsyncLruCache[tuple[str], EventCacheEntry] = ( diff --git a/synapse/storage/databases/main/lock.py b/synapse/storage/databases/main/lock.py index 51f04acbcb..dd49f98366 100644 --- a/synapse/storage/databases/main/lock.py +++ b/synapse/storage/databases/main/lock.py @@ -38,6 +38,7 @@ ) from synapse.types import ISynapseReactor from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -49,11 +50,13 @@ # How often to renew an acquired lock by updating the `last_renewed_ts` time in # the lock table. -_RENEWAL_INTERVAL_MS = 30 * 1000 +_RENEWAL_INTERVAL = Duration(seconds=30) # How long before an acquired lock times out. _LOCK_TIMEOUT_MS = 2 * 60 * 1000 +_LOCK_REAP_INTERVAL = Duration(milliseconds=_LOCK_TIMEOUT_MS / 10.0) + class LockStore(SQLBaseStore): """Provides a best effort distributed lock between worker instances. @@ -106,9 +109,7 @@ def __init__( self._acquiring_locks: set[tuple[str, str]] = set() - self.clock.looping_call( - self._reap_stale_read_write_locks, _LOCK_TIMEOUT_MS / 10.0 - ) + self.clock.looping_call(self._reap_stale_read_write_locks, _LOCK_REAP_INTERVAL) @wrap_as_background_process("LockStore._on_shutdown") async def _on_shutdown(self) -> None: @@ -410,7 +411,7 @@ def __init__( def _setup_looping_call(self) -> None: self._looping_call = self._clock.looping_call( self._renew, - _RENEWAL_INTERVAL_MS, + _RENEWAL_INTERVAL, self._server_name, self._store, self._hs, diff --git a/synapse/storage/databases/main/metrics.py b/synapse/storage/databases/main/metrics.py index 9b61769090..895c79f5da 100644 --- a/synapse/storage/databases/main/metrics.py +++ b/synapse/storage/databases/main/metrics.py @@ -35,6 +35,7 @@ from synapse.storage.databases.main.event_push_actions import ( EventPushActionsWorkerStore, ) +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -79,7 +80,7 @@ def __init__( # Read the extrems every 60 minutes if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._read_forward_extremities, 60 * 60 * 1000) + self.clock.looping_call(self._read_forward_extremities, Duration(hours=1)) # Used in _generate_user_daily_visits to keep track of progress self._last_user_visit_update = self._get_start_of_day() diff --git a/synapse/storage/databases/main/registration.py b/synapse/storage/databases/main/registration.py index 23991f28c9..c99faa29ca 100644 --- a/synapse/storage/databases/main/registration.py +++ b/synapse/storage/databases/main/registration.py @@ -49,13 +49,12 @@ from synapse.storage.util.sequence import build_sequence_generator from synapse.types import JsonDict, StrCollection, UserID, UserInfo from synapse.util.caches.descriptors import cached +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter if TYPE_CHECKING: from synapse.server import HomeServer -THIRTY_MINUTES_IN_MS = 30 * 60 * 1000 - logger = logging.getLogger(__name__) @@ -213,7 +212,7 @@ def __init__( if hs.config.worker.run_background_tasks: self.clock.call_later( - 0.0, + Duration(seconds=0), self._set_expiration_date_when_missing, ) @@ -227,7 +226,7 @@ def __init__( # Create a background job for culling expired 3PID validity tokens if hs.config.worker.run_background_tasks: self.clock.looping_call( - self.cull_expired_threepid_validation_tokens, THIRTY_MINUTES_IN_MS + self.cull_expired_threepid_validation_tokens, Duration(minutes=30) ) async def register_user( @@ -2782,7 +2781,7 @@ def __init__( # Create a background job for removing expired login tokens if hs.config.worker.run_background_tasks: self.clock.looping_call( - self._delete_expired_login_tokens, THIRTY_MINUTES_IN_MS + self._delete_expired_login_tokens, Duration(minutes=30) ) async def add_access_token_to_user( diff --git a/synapse/storage/databases/main/roommember.py b/synapse/storage/databases/main/roommember.py index 4fb7779d38..9b06ab69fe 100644 --- a/synapse/storage/databases/main/roommember.py +++ b/synapse/storage/databases/main/roommember.py @@ -63,6 +63,7 @@ get_domain_from_id, ) from synapse.util.caches.descriptors import _CacheContext, cached, cachedList +from synapse.util.duration import Duration from synapse.util.iterutils import batch_iter from synapse.util.metrics import Measure @@ -110,10 +111,10 @@ def __init__( self._known_servers_count = 1 self.hs.get_clock().looping_call( self._count_known_servers, - 60 * 1000, + Duration(minutes=1), ) self.hs.get_clock().call_later( - 1, + Duration(seconds=1), self._count_known_servers, ) federation_known_servers_gauge.register_hook( diff --git a/synapse/storage/databases/main/session.py b/synapse/storage/databases/main/session.py index 1154bb2d59..f088a8d88c 100644 --- a/synapse/storage/databases/main/session.py +++ b/synapse/storage/databases/main/session.py @@ -30,6 +30,7 @@ LoggingTransaction, ) from synapse.types import JsonDict +from synapse.util.duration import Duration from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -55,7 +56,7 @@ def __init__( # Create a background job for culling expired sessions. if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._delete_expired_sessions, 30 * 60 * 1000) + self.clock.looping_call(self._delete_expired_sessions, Duration(minutes=30)) async def create_session( self, session_type: str, value: JsonDict, expiry_ms: int diff --git a/synapse/storage/databases/main/sliding_sync.py b/synapse/storage/databases/main/sliding_sync.py index 2b67e75ac4..828eed3a73 100644 --- a/synapse/storage/databases/main/sliding_sync.py +++ b/synapse/storage/databases/main/sliding_sync.py @@ -20,6 +20,7 @@ from synapse.api.errors import SlidingSyncUnknownPosition from synapse.logging.opentracing import log_kv +from synapse.metrics.background_process_metrics import wrap_as_background_process from synapse.storage._base import SQLBaseStore, db_to_json from synapse.storage.database import ( DatabasePool, @@ -36,6 +37,7 @@ RoomSyncConfig, ) from synapse.util.caches.descriptors import cached +from synapse.util.duration import Duration from synapse.util.json import json_encoder if TYPE_CHECKING: @@ -45,6 +47,21 @@ logger = logging.getLogger(__name__) +# How often to update the `last_used_ts` column on +# `sliding_sync_connection_positions` when the client uses a connection +# position. We don't want to update it on every use to avoid excessive +# writes, but we want it to be reasonably up-to-date to help with +# cleaning up old connection positions. +UPDATE_INTERVAL_LAST_USED_TS = Duration(minutes=5) + +# Time in milliseconds the connection hasn't been used before we consider it +# expired and delete it. +CONNECTION_EXPIRY = Duration(days=7) + +# How often we run the background process to delete old sliding sync connections. +CONNECTION_EXPIRY_FREQUENCY = Duration(hours=1) + + class SlidingSyncStore(SQLBaseStore): def __init__( self, @@ -76,6 +93,12 @@ def __init__( replaces_index="sliding_sync_membership_snapshots_user_id", ) + if self.hs.config.worker.run_background_tasks: + self.clock.looping_call( + self.delete_old_sliding_sync_connections, + CONNECTION_EXPIRY_FREQUENCY, + ) + async def get_latest_bump_stamp_for_room( self, room_id: str, @@ -202,6 +225,7 @@ def persist_per_connection_state_txn( "effective_device_id": device_id, "conn_id": conn_id, "created_ts": self.clock.time_msec(), + "last_used_ts": self.clock.time_msec(), }, returning=("connection_key",), ) @@ -384,7 +408,7 @@ def _get_and_clear_connection_positions_txn( # The `previous_connection_position` is a user-supplied value, so we # need to make sure that the one they supplied is actually theirs. sql = """ - SELECT connection_key + SELECT connection_key, last_used_ts FROM sliding_sync_connection_positions INNER JOIN sliding_sync_connections USING (connection_key) WHERE @@ -396,7 +420,23 @@ def _get_and_clear_connection_positions_txn( if row is None: raise SlidingSyncUnknownPosition() - (connection_key,) = row + (connection_key, last_used_ts) = row + + # Update the `last_used_ts` if it's due to be updated. We don't update + # every time to avoid excessive writes. + now = self.clock.time_msec() + if ( + last_used_ts is None + or now - last_used_ts > UPDATE_INTERVAL_LAST_USED_TS.as_millis() + ): + self.db_pool.simple_update_txn( + txn, + table="sliding_sync_connections", + keyvalues={ + "connection_key": connection_key, + }, + updatevalues={"last_used_ts": now}, + ) # Now that we have seen the client has received and used the connection # position, we can delete all the other connection positions. @@ -480,12 +520,30 @@ def _get_and_clear_connection_positions_txn( logger.warning("Unrecognized sliding sync stream in DB %r", stream) return PerConnectionStateDB( + last_used_ts=last_used_ts, rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), account_data=RoomStatusMap(account_data), room_configs=room_configs, ) + @wrap_as_background_process("delete_old_sliding_sync_connections") + async def delete_old_sliding_sync_connections(self) -> None: + """Delete sliding sync connections that have not been used for a long time.""" + cutoff_ts = self.clock.time_msec() - CONNECTION_EXPIRY.as_millis() + + def delete_old_sliding_sync_connections_txn(txn: LoggingTransaction) -> None: + sql = """ + DELETE FROM sliding_sync_connections + WHERE last_used_ts IS NOT NULL AND last_used_ts < ? + """ + txn.execute(sql, (cutoff_ts,)) + + await self.db_pool.runInteraction( + "delete_old_sliding_sync_connections", + delete_old_sliding_sync_connections_txn, + ) + @attr.s(auto_attribs=True, frozen=True) class PerConnectionStateDB: @@ -498,6 +556,8 @@ class PerConnectionStateDB: When persisting this *only* contains updates to the state. """ + last_used_ts: int | None + rooms: "RoomStatusMap[str]" receipts: "RoomStatusMap[str]" account_data: "RoomStatusMap[str]" @@ -553,6 +613,7 @@ async def from_state( ) return PerConnectionStateDB( + last_used_ts=per_connection_state.last_used_ts, rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), account_data=RoomStatusMap(account_data), @@ -596,6 +657,7 @@ async def to_state(self, store: "DataStore") -> "PerConnectionState": } return PerConnectionState( + last_used_ts=self.last_used_ts, rooms=RoomStatusMap(rooms), receipts=RoomStatusMap(receipts), account_data=RoomStatusMap(account_data), diff --git a/synapse/storage/databases/main/state_deltas.py b/synapse/storage/databases/main/state_deltas.py index cd8f286d08..a5d5407327 100644 --- a/synapse/storage/databases/main/state_deltas.py +++ b/synapse/storage/databases/main/state_deltas.py @@ -78,27 +78,41 @@ def __init__( ) async def get_partial_current_state_deltas( - self, prev_stream_id: int, max_stream_id: int + self, prev_stream_id: int, max_stream_id: int, limit: int = 100 ) -> tuple[int, list[StateDelta]]: - """Fetch a list of room state changes since the given stream id + """Fetch a list of room state changes since the given stream id. This may be the partial state if we're lazy joining the room. + This method takes care to handle state deltas that share the same + `stream_id`. That can happen when persisting state in a batch, + potentially as the result of state resolution (both adding new state and + undo'ing previous state). + + State deltas are grouped by `stream_id`. When hitting the given `limit` + would return only part of a "group" of state deltas, that entire group + is omitted. Thus, this function may return *up to* `limit` state deltas, + or slightly more when a single group itself exceeds `limit`. + Args: prev_stream_id: point to get changes since (exclusive) max_stream_id: the point that we know has been correctly persisted - ie, an upper limit to return changes from. + limit: the maximum number of rows to return. Returns: A tuple consisting of: - the stream id which these results go up to - list of current_state_delta_stream rows. If it is empty, we are up to date. - - A maximum of 100 rows will be returned. """ prev_stream_id = int(prev_stream_id) + if limit <= 0: + raise ValueError( + "Invalid `limit` passed to `get_partial_current_state_deltas" + ) + # check we're not going backwards assert prev_stream_id <= max_stream_id, ( f"New stream id {max_stream_id} is smaller than prev stream id {prev_stream_id}" @@ -115,45 +129,62 @@ async def get_partial_current_state_deltas( def get_current_state_deltas_txn( txn: LoggingTransaction, ) -> tuple[int, list[StateDelta]]: - # First we calculate the max stream id that will give us less than - # N results. - # We arbitrarily limit to 100 stream_id entries to ensure we don't - # select toooo many. - sql = """ - SELECT stream_id, count(*) + # First we group state deltas by `stream_id` and calculate which + # groups can be returned without exceeding the provided `limit`. + sql_grouped = """ + SELECT stream_id, COUNT(*) AS c FROM current_state_delta_stream WHERE stream_id > ? AND stream_id <= ? GROUP BY stream_id - ORDER BY stream_id ASC - LIMIT 100 + ORDER BY stream_id + LIMIT ? """ - txn.execute(sql, (prev_stream_id, max_stream_id)) - - total = 0 - - for stream_id, count in txn: - total += count - if total > 100: - # We arbitrarily limit to 100 entries to ensure we don't - # select toooo many. - logger.debug( - "Clipping current_state_delta_stream rows to stream_id %i", - stream_id, - ) - clipped_stream_id = stream_id + group_limit = limit + 1 + txn.execute(sql_grouped, (prev_stream_id, max_stream_id, group_limit)) + grouped_rows = txn.fetchall() + + if not grouped_rows: + # Nothing to return in the range; we are up to date through max_stream_id. + return max_stream_id, [] + + # Always retrieve the first group, at the bare minimum. This ensures the + # caller always makes progress, even if a single group exceeds `limit`. + fetch_upto_stream_id, included_rows = grouped_rows[0] + + # Determine which other groups we can retrieve at the same time, + # without blowing the budget. + included_all_groups = True + for stream_id, count in grouped_rows[1:]: + if included_rows + count > limit: + included_all_groups = False break - else: - # if there's no problem, we may as well go right up to the max_stream_id - clipped_stream_id = max_stream_id + included_rows += count + fetch_upto_stream_id = stream_id + + # If we retrieved fewer groups than the limit *and* we didn't hit the + # `LIMIT ?` cap on the grouping query, we know we've caught up with + # the stream. + caught_up_with_stream = ( + included_all_groups and len(grouped_rows) < group_limit + ) + + # At this point we should have advanced, or bailed out early above. + assert fetch_upto_stream_id != prev_stream_id - # Now actually get the deltas - sql = """ + # 2) Fetch the actual rows for only the included stream_id groups. + sql_rows = """ SELECT stream_id, room_id, type, state_key, event_id, prev_event_id FROM current_state_delta_stream WHERE ? < stream_id AND stream_id <= ? ORDER BY stream_id ASC """ - txn.execute(sql, (prev_stream_id, clipped_stream_id)) + txn.execute(sql_rows, (prev_stream_id, fetch_upto_stream_id)) + rows = txn.fetchall() + + clipped_stream_id = ( + max_stream_id if caught_up_with_stream else fetch_upto_stream_id + ) + return clipped_stream_id, [ StateDelta( stream_id=row[0], @@ -163,7 +194,7 @@ def get_current_state_deltas_txn( event_id=row[4], prev_event_id=row[5], ) - for row in txn.fetchall() + for row in rows ] return await self.db_pool.runInteraction( diff --git a/synapse/storage/databases/main/stream.py b/synapse/storage/databases/main/stream.py index 8644ff412e..8fa1e2e5a9 100644 --- a/synapse/storage/databases/main/stream.py +++ b/synapse/storage/databases/main/stream.py @@ -740,7 +740,14 @@ async def get_rooms_that_have_updates_since_sliding_sync_table( from_key: RoomStreamToken, ) -> StrCollection: """Return the rooms that probably have had updates since the given - token (changes that are > `from_key`).""" + token (changes that are > `from_key`). + + May return false positives, but must not return false negatives. + + If `have_finished_sliding_sync_background_jobs` is False, then we return + all the room IDs, as we can't be sure that the sliding sync table is + fully populated. + """ # If the stream change cache is valid for the stream token, we can just # use the result of that. if from_key.stream >= self._events_stream_cache.get_earliest_known_position(): @@ -748,6 +755,11 @@ async def get_rooms_that_have_updates_since_sliding_sync_table( room_ids, from_key.stream ) + if not self.have_finished_sliding_sync_background_jobs(): + # If the table hasn't been populated yet, we have to assume all rooms + # have updates. + return room_ids + def get_rooms_that_have_updates_since_sliding_sync_table_txn( txn: LoggingTransaction, ) -> StrCollection: diff --git a/synapse/storage/databases/main/transactions.py b/synapse/storage/databases/main/transactions.py index 70c5b928fd..2fdd27d3da 100644 --- a/synapse/storage/databases/main/transactions.py +++ b/synapse/storage/databases/main/transactions.py @@ -37,6 +37,7 @@ from synapse.storage.databases.main.cache import CacheInvalidationWorkerStore from synapse.types import JsonDict, StrCollection from synapse.util.caches.descriptors import cached, cachedList +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -81,7 +82,7 @@ def __init__( super().__init__(database, db_conn, hs) if hs.config.worker.run_background_tasks: - self.clock.looping_call(self._cleanup_transactions, 30 * 60 * 1000) + self.clock.looping_call(self._cleanup_transactions, Duration(minutes=30)) @wrap_as_background_process("cleanup_transactions") async def _cleanup_transactions(self) -> None: diff --git a/synapse/storage/invite_rule.py b/synapse/storage/invite_rule.py index 3de77e8c21..489533a9f4 100644 --- a/synapse/storage/invite_rule.py +++ b/synapse/storage/invite_rule.py @@ -1,7 +1,9 @@ import logging +from abc import abstractmethod from enum import Enum from typing import Pattern +import attr from matrix_common.regex import glob_to_regex from synapse.types import JsonMapping, UserID @@ -18,9 +20,29 @@ class InviteRule(Enum): class InviteRulesConfig: - """Class to determine if a given user permits an invite from another user, and the action to take.""" + """An object encapsulating a given user's choices about whether to accept invites.""" - def __init__(self, account_data: JsonMapping | None): + @abstractmethod + def get_invite_rule(self, inviter_user_id: str) -> InviteRule: + """Get the invite rule that matches this user. Will return InviteRule.ALLOW if no rules match + + Args: + inviter_user_id: The user ID of the inviting user. + """ + + +@attr.s(slots=True) +class AllowAllInviteRulesConfig(InviteRulesConfig): + """An `InviteRulesConfig` implementation which will accept all invites.""" + + def get_invite_rule(self, inviter_user_id: str) -> InviteRule: + return InviteRule.ALLOW + + +class MSC4155InviteRulesConfig(InviteRulesConfig): + """An object encapsulating [MSC4155](https://github.com/matrix-org/matrix-spec-proposals/pull/4155) invite rules.""" + + def __init__(self, account_data: JsonMapping): self.allowed_users: list[Pattern[str]] = [] self.ignored_users: list[Pattern[str]] = [] self.blocked_users: list[Pattern[str]] = [] @@ -110,3 +132,21 @@ def get_invite_rule(self, user_id: str) -> InviteRule: return rule return InviteRule.ALLOW + + +@attr.s(slots=True, auto_attribs=True) +class MSC4380InviteRulesConfig(InviteRulesConfig): + default_invite_rule: InviteRule + """The invite rule to apply to all invites.""" + + @classmethod + def from_account_data(cls, data: JsonMapping) -> "MSC4380InviteRulesConfig": + default = data.get("default_action") + + default_invite_rule = ( + InviteRule.BLOCK if default == "block" else InviteRule.ALLOW + ) + return cls(default_invite_rule=default_invite_rule) + + def get_invite_rule(self, inviter_user_id: str) -> InviteRule: + return self.default_invite_rule diff --git a/synapse/storage/schema/main/delta/93/03_sss_pos_last_used.sql b/synapse/storage/schema/main/delta/93/03_sss_pos_last_used.sql new file mode 100644 index 0000000000..747ba7a144 --- /dev/null +++ b/synapse/storage/schema/main/delta/93/03_sss_pos_last_used.sql @@ -0,0 +1,27 @@ +-- +-- This file is licensed under the Affero General Public License (AGPL) version 3. +-- +-- Copyright (C) 2025 Element Creations, Ltd +-- +-- This program is free software: you can redistribute it and/or modify +-- it under the terms of the GNU Affero General Public License as +-- published by the Free Software Foundation, either version 3 of the +-- License, or (at your option) any later version. +-- +-- See the GNU Affero General Public License for more details: +-- . + +-- Add a timestamp for when the sliding sync connection position was last used, +-- only updated with a small granularity. +-- +-- This should be NOT NULL, but we need to consider existing rows. In future we +-- may want to either backfill this or delete all rows with a NULL value (and +-- then make it NOT NULL). +ALTER TABLE sliding_sync_connections ADD COLUMN last_used_ts BIGINT; + +-- Note: We don't add an index on this column to allow HOT updates on PostgreSQL +-- to reduce the cost of the updates to the column. c.f. +-- https://www.postgresql.org/docs/current/storage-hot.html +-- +-- We do query this column directly to find expired connections, but we expect +-- that to be an infrequent operation and a sequential scan should be fine. diff --git a/synapse/types/handlers/sliding_sync.py b/synapse/types/handlers/sliding_sync.py index 494e3570d0..03b3bcb3ca 100644 --- a/synapse/types/handlers/sliding_sync.py +++ b/synapse/types/handlers/sliding_sync.py @@ -850,12 +850,16 @@ class PerConnectionState: since the last time you made a sync request. Attributes: + last_used_ts: The time this connection was last used, in milliseconds. + This is only accurate to `UPDATE_CONNECTION_STATE_EVERY_MS`. rooms: The status of each room for the events stream. receipts: The status of each room for the receipts stream. room_configs: Map from room_id to the `RoomSyncConfig` of all rooms that we have previously sent down. """ + last_used_ts: int | None = None + rooms: RoomStatusMap[RoomStreamToken] = attr.Factory(RoomStatusMap) receipts: RoomStatusMap[MultiWriterStreamToken] = attr.Factory(RoomStatusMap) account_data: RoomStatusMap[int] = attr.Factory(RoomStatusMap) @@ -867,6 +871,7 @@ def get_mutable(self) -> "MutablePerConnectionState": room_configs = cast(MutableMapping[str, RoomSyncConfig], self.room_configs) return MutablePerConnectionState( + last_used_ts=self.last_used_ts, rooms=self.rooms.get_mutable(), receipts=self.receipts.get_mutable(), account_data=self.account_data.get_mutable(), @@ -875,6 +880,7 @@ def get_mutable(self) -> "MutablePerConnectionState": def copy(self) -> "PerConnectionState": return PerConnectionState( + last_used_ts=self.last_used_ts, rooms=self.rooms.copy(), receipts=self.receipts.copy(), account_data=self.account_data.copy(), @@ -889,6 +895,8 @@ def __len__(self) -> int: class MutablePerConnectionState(PerConnectionState): """A mutable version of `PerConnectionState`""" + last_used_ts: int | None + rooms: MutableRoomStatusMap[RoomStreamToken] receipts: MutableRoomStatusMap[MultiWriterStreamToken] account_data: MutableRoomStatusMap[int] diff --git a/synapse/util/__init__.py b/synapse/util/__init__.py index f937080f9e..fbd01914d5 100644 --- a/synapse/util/__init__.py +++ b/synapse/util/__init__.py @@ -41,15 +41,6 @@ logger = logging.getLogger(__name__) -class Duration: - """Helper class that holds constants for common time durations in - milliseconds.""" - - MINUTE_MS = 60 * 1000 - HOUR_MS = 60 * MINUTE_MS - DAY_MS = 24 * HOUR_MS - - def unwrapFirstError(failure: Failure) -> Failure: # Deprecated: you probably just want to catch defer.FirstError and reraise # the subFailure's value, which will do a better job of preserving stacktraces. diff --git a/synapse/util/async_helpers.py b/synapse/util/async_helpers.py index 6f9bbcac67..818f8b1a69 100644 --- a/synapse/util/async_helpers.py +++ b/synapse/util/async_helpers.py @@ -58,6 +58,7 @@ run_in_background, ) from synapse.util.clock import Clock +from synapse.util.duration import Duration logger = logging.getLogger(__name__) @@ -640,7 +641,7 @@ async def _acquire_lock(self, key: Hashable) -> _LinearizerEntry: # This needs to happen while we hold the lock. We could put it on the # exit path, but that would slow down the uncontended case. try: - await self._clock.sleep(0) + await self._clock.sleep(Duration(seconds=0)) except CancelledError: self._release_lock(key, entry) raise @@ -818,7 +819,9 @@ def time_it_out() -> None: # We don't track these calls since they are short. delayed_call = clock.call_later( - timeout, time_it_out, call_later_cancel_on_shutdown=cancel_on_shutdown + Duration(seconds=timeout), + time_it_out, + call_later_cancel_on_shutdown=cancel_on_shutdown, ) def convert_cancelled(value: Failure) -> Failure: diff --git a/synapse/util/background_queue.py b/synapse/util/background_queue.py index 93ffd9f271..dfea7247f4 100644 --- a/synapse/util/background_queue.py +++ b/synapse/util/background_queue.py @@ -27,7 +27,7 @@ from twisted.internet import defer from synapse.util.async_helpers import DeferredEvent -from synapse.util.constants import MILLISECONDS_PER_SECOND +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -67,7 +67,7 @@ def __init__( self._hs = hs self._name = name self._callback = callback - self._timeout_ms = timeout_ms + self._timeout_ms = Duration(milliseconds=timeout_ms) # The queue of items to process. self._queue: collections.deque[T] = collections.deque() @@ -125,7 +125,7 @@ async def _process_queue(self) -> None: # just loop round, clear the event, recheck the queue, and then # wait here again. new_data = await self._wakeup_event.wait( - timeout_seconds=self._timeout_ms / MILLISECONDS_PER_SECOND + timeout_seconds=self._timeout_ms.as_secs() ) if not new_data: # Timed out waiting for new data, so exit the loop diff --git a/synapse/util/batching_queue.py b/synapse/util/batching_queue.py index 514abcbec1..43eefcb7f1 100644 --- a/synapse/util/batching_queue.py +++ b/synapse/util/batching_queue.py @@ -36,6 +36,7 @@ from synapse.logging.context import PreserveLoggingContext, make_deferred_yieldable from synapse.metrics import SERVER_NAME_LABEL from synapse.util.clock import Clock +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -175,7 +176,7 @@ async def _process_queue(self, key: Hashable) -> None: # pattern is to call `add_to_queue` multiple times at once, and # deferring to the next reactor tick allows us to batch all of # those up. - await self._clock.sleep(0) + await self._clock.sleep(Duration(seconds=0)) next_values = self._next_values.pop(key, []) if not next_values: diff --git a/synapse/util/caches/expiringcache.py b/synapse/util/caches/expiringcache.py index 528e4bb852..87870f4223 100644 --- a/synapse/util/caches/expiringcache.py +++ b/synapse/util/caches/expiringcache.py @@ -38,6 +38,7 @@ from synapse.config import cache as cache_config from synapse.util.caches import EvictionReason, register_cache from synapse.util.clock import Clock +from synapse.util.duration import Duration if TYPE_CHECKING: from synapse.server import HomeServer @@ -112,7 +113,7 @@ def __init__( def f() -> "defer.Deferred[None]": return hs.run_as_background_process("prune_cache", self._prune_cache) - self._clock.looping_call(f, self._expiry_ms / 2) + self._clock.looping_call(f, Duration(milliseconds=self._expiry_ms / 2)) def __setitem__(self, key: KT, value: VT) -> None: now = self._clock.time_msec() diff --git a/synapse/util/caches/lrucache.py b/synapse/util/caches/lrucache.py index d304e804e9..a3e7bd4d03 100644 --- a/synapse/util/caches/lrucache.py +++ b/synapse/util/caches/lrucache.py @@ -50,6 +50,7 @@ iterate_tree_cache_items, ) from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.linked_list import ListNode if TYPE_CHECKING: @@ -202,9 +203,9 @@ async def _internal_expire_old_entries( if (i + 1) % 10000 == 0: logger.debug("Waiting during drop") if node.last_access_ts_secs > now - expiry_seconds: - await clock.sleep(0.5) + await clock.sleep(Duration(milliseconds=500)) else: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) logger.debug("Waking during drop") node = next_node @@ -248,7 +249,7 @@ def setup_expire_lru_cache_entries(hs: "HomeServer") -> None: clock = hs.get_clock() clock.looping_call( _expire_old_entries, - 30 * 1000, + Duration(seconds=30), server_name, hs, clock, diff --git a/synapse/util/caches/response_cache.py b/synapse/util/caches/response_cache.py index b1cdc81dda..0289e13f6a 100644 --- a/synapse/util/caches/response_cache.py +++ b/synapse/util/caches/response_cache.py @@ -42,6 +42,7 @@ from synapse.util.async_helpers import AbstractObservableDeferred, ObservableDeferred from synapse.util.caches import EvictionReason, register_cache from synapse.util.clock import Clock +from synapse.util.duration import Duration logger = logging.getLogger(__name__) @@ -120,7 +121,7 @@ def __init__( self._result_cache: dict[KV, ResponseCacheEntry] = {} self.clock = clock - self.timeout_sec = timeout_ms / 1000.0 + self.timeout = Duration(milliseconds=timeout_ms) self._name = name self._metrics = register_cache( @@ -195,9 +196,9 @@ def on_complete(r: RV) -> RV: # if this cache has a non-zero timeout, and the callback has not cleared # the should_cache bit, we leave it in the cache for now and schedule # its removal later. - if self.timeout_sec and context.should_cache: + if self.timeout and context.should_cache: self.clock.call_later( - self.timeout_sec, + self.timeout, self._entry_timeout, key, # We don't need to track these calls since they don't hold any strong diff --git a/synapse/util/clock.py b/synapse/util/clock.py index 52ac5dcef3..4355704f8a 100644 --- a/synapse/util/clock.py +++ b/synapse/util/clock.py @@ -32,6 +32,7 @@ from synapse.logging.loggers import ExplicitlyConfiguredLogger from synapse.types import ISynapseThreadlessReactor from synapse.util import log_failure +from synapse.util.duration import Duration from synapse.util.stringutils import random_string_insecure_fast P = ParamSpec("P") @@ -104,14 +105,14 @@ def shutdown(self) -> None: self.cancel_all_looping_calls() self.cancel_all_delayed_calls() - async def sleep(self, seconds: float) -> None: + async def sleep(self, duration: Duration) -> None: d: defer.Deferred[float] = defer.Deferred() # Start task in the `sentinel` logcontext, to avoid leaking the current context # into the reactor once it finishes. with context.PreserveLoggingContext(): # We can ignore the lint here since this class is the one location callLater should # be called. - self._reactor.callLater(seconds, d.callback, seconds) # type: ignore[call-later-not-tracked] + self._reactor.callLater(duration.as_secs(), d.callback, duration.as_secs()) # type: ignore[call-later-not-tracked] await d def time(self) -> float: @@ -125,13 +126,13 @@ def time_msec(self) -> int: def looping_call( self, f: Callable[P, object], - msec: float, + duration: Duration, *args: P.args, **kwargs: P.kwargs, ) -> LoopingCall: """Call a function repeatedly. - Waits `msec` initially before calling `f` for the first time. + Waits `duration` initially before calling `f` for the first time. If the function given to `looping_call` returns an awaitable/deferred, the next call isn't scheduled until after the returned awaitable has finished. We get @@ -144,16 +145,16 @@ def looping_call( Args: f: The function to call repeatedly. - msec: How long to wait between calls in milliseconds. + duration: How long to wait between calls. *args: Positional arguments to pass to function. **kwargs: Key arguments to pass to function. """ - return self._looping_call_common(f, msec, False, *args, **kwargs) + return self._looping_call_common(f, duration, False, *args, **kwargs) def looping_call_now( self, f: Callable[P, object], - msec: float, + duration: Duration, *args: P.args, **kwargs: P.kwargs, ) -> LoopingCall: @@ -168,16 +169,16 @@ def looping_call_now( Args: f: The function to call repeatedly. - msec: How long to wait between calls in milliseconds. + duration: How long to wait between calls. *args: Positional arguments to pass to function. **kwargs: Key arguments to pass to function. """ - return self._looping_call_common(f, msec, True, *args, **kwargs) + return self._looping_call_common(f, duration, True, *args, **kwargs) def _looping_call_common( self, f: Callable[P, object], - msec: float, + duration: Duration, now: bool, *args: P.args, **kwargs: P.kwargs, @@ -237,7 +238,7 @@ def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred: # We want to start the task in the `sentinel` logcontext, to avoid leaking the # current context into the reactor after the function finishes. with context.PreserveLoggingContext(): - d = call.start(msec / 1000.0, now=now) + d = call.start(duration.as_secs(), now=now) d.addErrback(log_failure, "Looping call died", consumeErrors=False) self._looping_calls.append(call) @@ -245,7 +246,7 @@ def wrapped_f(*args: P.args, **kwargs: P.kwargs) -> Deferred: "%s(%s): Scheduled looping call every %sms later", looping_call_context_string, instance_id, - msec, + duration.as_millis(), # Find out who is scheduling the call which makes it easy to follow in the # logs. stack_info=True, @@ -271,7 +272,7 @@ def cancel_all_looping_calls(self, consumeErrors: bool = True) -> None: def call_later( self, - delay: float, + delay: Duration, callback: Callable, *args: Any, call_later_cancel_on_shutdown: bool = True, @@ -284,7 +285,7 @@ def call_later( `run_as_background_process` to give it more specific label and track metrics. Args: - delay: How long to wait in seconds. + delay: How long to wait. callback: Function to call *args: Postional arguments to pass to function. call_later_cancel_on_shutdown: Whether this call should be tracked for cleanup during @@ -342,7 +343,9 @@ def wrapped_callback(*args: Any, **kwargs: Any) -> None: # We can ignore the lint here since this class is the one location callLater should # be called. - call = self._reactor.callLater(delay, wrapped_callback, *args, **kwargs) # type: ignore[call-later-not-tracked] + call = self._reactor.callLater( + delay.as_secs(), wrapped_callback, *args, **kwargs + ) # type: ignore[call-later-not-tracked] clock_debug_logger.debug( "call_later(%s): Scheduled call for %ss later (tracked for shutdown: %s)", diff --git a/synapse/util/constants.py b/synapse/util/constants.py deleted file mode 100644 index 7a3d073df5..0000000000 --- a/synapse/util/constants.py +++ /dev/null @@ -1,22 +0,0 @@ -# -# This file is licensed under the Affero General Public License (AGPL) version 3. -# -# Copyright (C) 2025 New Vector, Ltd -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# See the GNU Affero General Public License for more details: -# . -# - -# Time-based constants. -# -# Laying these out incrementally, even if only some are required, helps with -# readability and catching bugs. -ONE_MINUTE_SECONDS = 60 -ONE_HOUR_SECONDS = 60 * ONE_MINUTE_SECONDS - -MILLISECONDS_PER_SECOND = 1000 diff --git a/synapse/util/duration.py b/synapse/util/duration.py new file mode 100644 index 0000000000..135b980852 --- /dev/null +++ b/synapse/util/duration.py @@ -0,0 +1,117 @@ +# +# This file is licensed under the Affero General Public License (AGPL) version 3. +# +# Copyright (C) 2025 Element Creations Ltd +# +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU Affero General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# See the GNU Affero General Public License for more details: +# . +# + +from datetime import timedelta +from typing import overload + +# Constant so we don't keep creating new timedelta objects when calling +# `.as_millis()`. +_ONE_MILLISECOND = timedelta(milliseconds=1) + + +class Duration(timedelta): + """A subclass of timedelta that adds a convenience method for getting + the duration in milliseconds. + + Examples: + + ``` + duration = Duration(hours=2) + print(duration.as_millis()) # Outputs: 7200000 + ``` + """ + + def as_millis(self) -> int: + """Returns the duration in milliseconds.""" + return int(self / _ONE_MILLISECOND) + + def as_secs(self) -> float: + """Returns the duration in seconds.""" + return self.total_seconds() + + # Override arithmetic operations to return Duration instances + + def __add__(self, other: timedelta) -> "Duration": + """Add two durations together, returning a Duration.""" + result = super().__add__(other) + return Duration(seconds=result.total_seconds()) + + def __radd__(self, other: timedelta) -> "Duration": + """Add two durations together (reversed), returning a Duration.""" + result = super().__radd__(other) + return Duration(seconds=result.total_seconds()) + + def __sub__(self, other: timedelta) -> "Duration": + """Subtract two durations, returning a Duration.""" + result = super().__sub__(other) + return Duration(seconds=result.total_seconds()) + + def __rsub__(self, other: timedelta) -> "Duration": + """Subtract two durations (reversed), returning a Duration.""" + result = super().__rsub__(other) + return Duration(seconds=result.total_seconds()) + + def __mul__(self, other: float) -> "Duration": + """Multiply a duration by a scalar, returning a Duration.""" + result = super().__mul__(other) + return Duration(seconds=result.total_seconds()) + + def __rmul__(self, other: float) -> "Duration": + """Multiply a duration by a scalar (reversed), returning a Duration.""" + result = super().__rmul__(other) + return Duration(seconds=result.total_seconds()) + + @overload + def __truediv__(self, other: timedelta) -> float: ... + + @overload + def __truediv__(self, other: float) -> "Duration": ... + + def __truediv__(self, other: float | timedelta) -> "Duration | float": + """Divide a duration by a scalar or another duration. + + If dividing by a scalar, returns a Duration. + If dividing by a timedelta, returns a float ratio. + """ + result = super().__truediv__(other) + if isinstance(other, timedelta): + # Dividing by a timedelta gives a float ratio + assert isinstance(result, float) + return result + else: + # Dividing by a scalar gives a Duration + assert isinstance(result, timedelta) + return Duration(seconds=result.total_seconds()) + + @overload + def __floordiv__(self, other: timedelta) -> int: ... + + @overload + def __floordiv__(self, other: int) -> "Duration": ... + + def __floordiv__(self, other: int | timedelta) -> "Duration | int": + """Floor divide a duration by a scalar or another duration. + + If dividing by a scalar, returns a Duration. + If dividing by a timedelta, returns an int ratio. + """ + result = super().__floordiv__(other) + if isinstance(other, timedelta): + # Dividing by a timedelta gives an int ratio + assert isinstance(result, int) + return result + else: + # Dividing by a scalar gives a Duration + assert isinstance(result, timedelta) + return Duration(seconds=result.total_seconds()) diff --git a/synapse/util/ratelimitutils.py b/synapse/util/ratelimitutils.py index 024706d9cf..d1053d227b 100644 --- a/synapse/util/ratelimitutils.py +++ b/synapse/util/ratelimitutils.py @@ -48,6 +48,7 @@ from synapse.logging.opentracing import start_active_span from synapse.metrics import SERVER_NAME_LABEL, Histogram, LaterGauge from synapse.util.clock import Clock +from synapse.util.duration import Duration if typing.TYPE_CHECKING: from contextlib import _GeneratorContextManager @@ -353,7 +354,9 @@ def queue_request() -> "defer.Deferred[None]": rate_limiter_name=self.metrics_name, **{SERVER_NAME_LABEL: self.our_server_name}, ).inc() - ret_defer = run_in_background(self.clock.sleep, self.sleep_sec) + ret_defer = run_in_background( + self.clock.sleep, Duration(seconds=self.sleep_sec) + ) self.sleeping_requests.add(request_id) @@ -414,6 +417,6 @@ def start_next_request() -> None: pass self.clock.call_later( - 0.0, + Duration(seconds=0), start_next_request, ) diff --git a/synapse/util/rust.py b/synapse/util/rust.py index 63b53b917f..d1e1a259e4 100644 --- a/synapse/util/rust.py +++ b/synapse/util/rust.py @@ -111,7 +111,38 @@ def get_synapse_source_directory() -> str | None: # c.f. https://packaging.python.org/en/latest/specifications/direct-url/ direct_url_json = package.read_text("direct_url.json") if direct_url_json is None: - return None + # No direct url metadata. Check if this is an egg-info install. + # + # An egg-info install is when there exists a `matrix_synapse.egg-info` + # directory alongside the source tree, containing the package metadata. + # This allows discovering packages in the current directory, without + # installing them properly to the environment wide `site-packages` + # directory. + # + # When searching for a package, Python will look for `.egg-info` files + # in the current working directory before looking in `site-packages`. + # This means that when running Synapse (or the tests) from the source + # tree Python will pick up the synapse package from the egg-info + # install. + # + # Poetry will create an egg-info install when running `poetry install`. + # + # The combination of the above means that it is very common for + # developers (e.g. running tests) to encounter egg-info installs. + # + # In this case we can find the source tree by looking for the + # `matrix_synapse.egg-info/PKG-INFO` file, and going up two directories + # from there. + + metadata_path = package.locate_file("matrix_synapse.egg-info/PKG-INFO") + if not os.path.exists(str(metadata_path)): + # Not an egg-info install. + return None + + # `metadata_path` points to the egg-info/PKG-INFO file, so go up two + # directories to get the root of the source tree. + source_dir = metadata_path.parent.parent + return os.fspath(source_dir) # c.f. https://packaging.python.org/en/latest/specifications/direct-url/ for # the format diff --git a/synapse/util/task_scheduler.py b/synapse/util/task_scheduler.py index 3b4423a1ff..353ddb70bc 100644 --- a/synapse/util/task_scheduler.py +++ b/synapse/util/task_scheduler.py @@ -35,6 +35,7 @@ wrap_as_background_process, ) from synapse.types import JsonMapping, ScheduledTask, TaskStatus +from synapse.util.duration import Duration from synapse.util.stringutils import random_string if TYPE_CHECKING: @@ -92,8 +93,8 @@ class TaskScheduler: """ # Precision of the scheduler, evaluation of tasks to run will only happen - # every `SCHEDULE_INTERVAL_MS` ms - SCHEDULE_INTERVAL_MS = 1 * 60 * 1000 # 1mn + # every `SCHEDULE_INTERVAL` + SCHEDULE_INTERVAL = Duration(minutes=1) # How often to clean up old tasks. CLEANUP_INTERVAL_MS = 30 * 60 * 1000 # Time before a complete or failed task is deleted from the DB @@ -103,7 +104,7 @@ class TaskScheduler: # Time from the last task update after which we will log a warning LAST_UPDATE_BEFORE_WARNING_MS = 24 * 60 * 60 * 1000 # 24hrs # Report a running task's status and usage every so often. - OCCASIONAL_REPORT_INTERVAL_MS = 5 * 60 * 1000 # 5 minutes + OCCASIONAL_REPORT_INTERVAL = Duration(minutes=5) def __init__(self, hs: "HomeServer"): self.hs = hs # nb must be called this for @wrap_as_background_process @@ -127,11 +128,11 @@ def __init__(self, hs: "HomeServer"): if self._run_background_tasks: self._clock.looping_call( self._launch_scheduled_tasks, - TaskScheduler.SCHEDULE_INTERVAL_MS, + TaskScheduler.SCHEDULE_INTERVAL, ) self._clock.looping_call( self._clean_scheduled_tasks, - TaskScheduler.SCHEDULE_INTERVAL_MS, + TaskScheduler.SCHEDULE_INTERVAL, ) running_tasks_gauge.register_hook( @@ -433,7 +434,7 @@ async def wrapper() -> None: start_time = self._clock.time() occasional_status_call = self._clock.looping_call( _occasional_report, - TaskScheduler.OCCASIONAL_REPORT_INTERVAL_MS, + TaskScheduler.OCCASIONAL_REPORT_INTERVAL, log_context, start_time, ) @@ -468,7 +469,7 @@ async def wrapper() -> None: # Try launch a new task since we've finished with this one. self._clock.call_later( - 0.1, + Duration(milliseconds=100), self._launch_scheduled_tasks, ) diff --git a/synapse/visibility.py b/synapse/visibility.py index 16b39e6200..bfa0db5670 100644 --- a/synapse/visibility.py +++ b/synapse/visibility.py @@ -33,6 +33,7 @@ EventTypes, EventUnsignedContentFields, HistoryVisibility, + JoinRules, Membership, ) from synapse.events import EventBase @@ -111,7 +112,17 @@ async def filter_events_for_client( # happen within the function. events_before_filtering = events.copy() # Default case is to *exclude* soft-failed events - events = [e for e in events if not e.internal_metadata.is_soft_failed()] + events = [] + found_call_invite = False + for event in events_before_filtering: + if event.internal_metadata.is_soft_failed(): + continue + + if event.type == EventTypes.CallInvite and not event.is_state(): + found_call_invite = True + + events.append(event) + client_config = await storage.main.get_admin_client_config_for_user(user_id) if filter_send_to_client and await storage.main.is_server_admin(user_id): if client_config.return_soft_failed_events: @@ -139,7 +150,11 @@ async def filter_events_for_client( [event.event_id for event in events], ) - types = (_HISTORY_VIS_KEY, (EventTypes.Member, user_id)) + types = [_HISTORY_VIS_KEY, (EventTypes.Member, user_id)] + if found_call_invite: + # We need to fetch the room's join rules state to determine + # whether to allow call invites in public rooms. + types.append((EventTypes.JoinRules, "")) # we exclude outliers at this point, and then handle them separately later event_id_to_state = await storage.state.get_state_for_events( @@ -178,6 +193,25 @@ def allowed(event: EventBase) -> EventBase | None: if filtered is None: return None + # Filter out call invites in public rooms, as this would potentially + # ring a lot of users. + if event.type == EventTypes.CallInvite and not event.is_state(): + # `state_after_event` should only be None if the event is an outlier, + # and earlier code should filter out outliers entirely. + # + # In addition, we only create outliers locally for out-of-band + # invite rejections, invites received over federation, or state + # events needed to authorise other events. None of this applies to + # call invites. + assert state_after_event is not None + + room_join_rules = state_after_event.get((EventTypes.JoinRules, "")) + if ( + room_join_rules is not None + and room_join_rules.content.get("join_rule") == JoinRules.PUBLIC + ): + return None + # Annotate the event with the user's membership after the event. # # Normally we just look in `state_after_event`, but if the event is an outlier diff --git a/synmark/suites/logging.py b/synmark/suites/logging.py index d89f487d3d..243f9dbca0 100644 --- a/synmark/suites/logging.py +++ b/synmark/suites/logging.py @@ -37,6 +37,7 @@ from synapse.synapse_rust import reset_logging_config from synapse.types import ISynapseReactor from synapse.util.clock import Clock +from synapse.util.duration import Duration class LineCounter(LineOnlyReceiver): @@ -141,7 +142,7 @@ class _logging: if len(handler._buffer) == handler.maximum_buffer: while len(handler._buffer) > handler.maximum_buffer / 2: - await clock.sleep(0.01) + await clock.sleep(Duration(milliseconds=10)) await logger_factory.on_done diff --git a/tests/federation/test_federation_server.py b/tests/federation/test_federation_server.py index 28f892c487..84d961e2dc 100644 --- a/tests/federation/test_federation_server.py +++ b/tests/federation/test_federation_server.py @@ -30,6 +30,7 @@ from synapse.api.errors import FederationError from synapse.api.room_versions import KNOWN_ROOM_VERSIONS, RoomVersions from synapse.config.server import DEFAULT_ROOM_VERSION +from synapse.crypto.event_signing import add_hashes_and_signatures from synapse.events import EventBase, make_event_from_dict from synapse.federation.federation_base import event_from_pdu_json from synapse.http.types import QueryParams @@ -356,19 +357,44 @@ def _make_join(self, user_id: str) -> JsonDict: self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) return channel.json_body - def test_send_join(self) -> None: + def _test_send_join_common(self, room_version: str) -> None: """happy-path test of send_join""" + creator_user_id = self.register_user(f"kermit_v{room_version}", "test") + tok = self.login(f"kermit_v{room_version}", "test") + room_id = self.helper.create_room_as( + room_creator=creator_user_id, tok=tok, room_version=room_version + ) + + # Second member joins + second_member_user_id = self.register_user(f"fozzie_v{room_version}", "bear") + tok2 = self.login(f"fozzie_v{room_version}", "bear") + self.helper.join(room_id, second_member_user_id, tok=tok2) + + # Make join for remote user joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME - join_result = self._make_join(joining_user) + channel = self.make_signed_federation_request( + "GET", + f"/_matrix/federation/v1/make_join/{room_id}/{joining_user}?ver={room_version}", + ) + self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) + join_result = channel.json_body + # Sign and send the join join_event_dict = join_result["event"] self.add_hashes_and_signatures_from_other_server( join_event_dict, - KNOWN_ROOM_VERSIONS[DEFAULT_ROOM_VERSION], + KNOWN_ROOM_VERSIONS[room_version], ) + if room_version in ["1", "2"]: + add_hashes_and_signatures( + KNOWN_ROOM_VERSIONS[room_version], + join_event_dict, + signature_name=self.hs.hostname, + signing_key=self.hs.signing_key, + ) channel = self.make_signed_federation_request( "PUT", - f"/_matrix/federation/v2/send_join/{self._room_id}/x", + f"/_matrix/federation/v2/send_join/{room_id}/x", content=join_event_dict, ) self.assertEqual(channel.code, HTTPStatus.OK, channel.json_body) @@ -384,8 +410,8 @@ def test_send_join(self) -> None: ("m.room.power_levels", ""), ("m.room.join_rules", ""), ("m.room.history_visibility", ""), - ("m.room.member", "@kermit:test"), - ("m.room.member", "@fozzie:test"), + ("m.room.member", f"@kermit_v{room_version}:test"), + ("m.room.member", f"@fozzie_v{room_version}:test"), # nb: *not* the joining user ], ) @@ -398,18 +424,28 @@ def test_send_join(self) -> None: returned_auth_chain_events, [ ("m.room.create", ""), - ("m.room.member", "@kermit:test"), + ("m.room.member", f"@kermit_v{room_version}:test"), ("m.room.power_levels", ""), ("m.room.join_rules", ""), ], ) # the room should show that the new user is a member - r = self.get_success( - self._storage_controllers.state.get_current_state(self._room_id) - ) + r = self.get_success(self._storage_controllers.state.get_current_state(room_id)) self.assertEqual(r[("m.room.member", joining_user)].membership, "join") + @parameterized.expand([(k,) for k in KNOWN_ROOM_VERSIONS.keys()]) + @override_config({"use_frozen_dicts": True}) + def test_send_join_with_frozen_dicts(self, room_version: str) -> None: + """Test send_join with USE_FROZEN_DICTS=True""" + self._test_send_join_common(room_version) + + @parameterized.expand([(k,) for k in KNOWN_ROOM_VERSIONS.keys()]) + @override_config({"use_frozen_dicts": False}) + def test_send_join_without_frozen_dicts(self, room_version: str) -> None: + """Test send_join with USE_FROZEN_DICTS=False""" + self._test_send_join_common(room_version) + def test_send_join_partial_state(self) -> None: """/send_join should return partial state, if requested""" joining_user = "@misspiggy:" + self.OTHER_SERVER_NAME diff --git a/tests/federation/transport/server/test__base.py b/tests/federation/transport/server/test__base.py index 3c553e6e40..00a9c2064c 100644 --- a/tests/federation/transport/server/test__base.py +++ b/tests/federation/transport/server/test__base.py @@ -30,6 +30,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from synapse.util.ratelimitutils import FederationRateLimiter from tests import unittest @@ -53,13 +54,13 @@ def __init__( async def on_GET( self, origin: str, content: None, query: dict[bytes, list[bytes]] ) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} async def on_POST( self, origin: str, content: JsonDict, query: dict[bytes, list[bytes]] ) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} diff --git a/tests/handlers/test_device.py b/tests/handlers/test_device.py index acd37a1c71..183234b8a0 100644 --- a/tests/handlers/test_device.py +++ b/tests/handlers/test_device.py @@ -250,7 +250,7 @@ def test_delete_device_and_big_device_inbox(self) -> None: self.assertEqual(10, len(res)) # wait for the task scheduler to do a second delete pass - self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000) + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs()) # remaining messages should now be deleted res = self.get_success( @@ -449,6 +449,33 @@ def test_on_federation_query_user_devices_appservice(self) -> None: ], ) + def test_delete_device_removes_refresh_tokens(self) -> None: + """Deleting a device should also purge any refresh tokens for it.""" + self._record_users() + + self.get_success( + self.store.add_refresh_token_to_user( + user_id=user1, + token="refresh_token", + device_id="abc", + expiry_ts=None, + ultimate_session_expiry_ts=None, + ) + ) + + self.get_success(self.handler.delete_devices(user1, ["abc"])) + + remaining_refresh_token = self.get_success( + self.store.db_pool.simple_select_one( + table="refresh_tokens", + keyvalues={"user_id": user1, "device_id": "abc"}, + retcols=("id",), + desc="get_refresh_token_for_device", + allow_none=True, + ) + ) + self.assertIsNone(remaining_refresh_token) + class DehydrationTestCase(unittest.HomeserverTestCase): servlets = [ diff --git a/tests/handlers/test_room_member.py b/tests/handlers/test_room_member.py index 8f9e27603e..d8d7caaf1b 100644 --- a/tests/handlers/test_room_member.py +++ b/tests/handlers/test_room_member.py @@ -458,7 +458,9 @@ def test_deduplicate_joins(self) -> None: self.assertEqual(initial_count, new_count) -class TestInviteFiltering(FederatingHomeserverTestCase): +class TestMSC4155InviteFiltering(FederatingHomeserverTestCase): + """Tests for MSC4155-style invite filtering.""" + servlets = [ synapse.rest.admin.register_servlets, synapse.rest.client.login.register_servlets, @@ -618,3 +620,145 @@ def test_msc4155_block_invite_remote_server(self) -> None: ).value self.assertEqual(f.code, 403) self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") + + +class TestMSC4380InviteBlocking(FederatingHomeserverTestCase): + """Tests for MSC4380-style invite filtering.""" + + servlets = [ + synapse.rest.admin.register_servlets, + synapse.rest.client.login.register_servlets, + synapse.rest.client.room.register_servlets, + ] + + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + self.handler = hs.get_room_member_handler() + self.fed_handler = hs.get_federation_handler() + self.store = hs.get_datastores().main + + # Create two users. + self.alice = self.register_user("alice", "pass") + self.alice_token = self.login("alice", "pass") + self.bob = self.register_user("bob", "pass") + self.bob_token = self.login("bob", "pass") + + @override_config({"experimental_features": {"msc4380_enabled": True}}) + def test_misc4380_block_invite_local(self) -> None: + """Test that MSC4380 will block a user from being invited to a room""" + room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG, + { + "default_action": "block", + }, + ) + ) + + f = self.get_failure( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.bob), + room_id=room_id, + action=Membership.INVITE, + ), + SynapseError, + ).value + self.assertEqual(f.code, 403) + self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") + + @override_config({"experimental_features": {"msc4380_enabled": True}}) + def test_misc4380_non_string_setting(self) -> None: + """Test that `default_action` being set to something non-stringy is the same as "accept".""" + room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG, + { + "default_action": 1, + }, + ) + ) + + self.get_success( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.bob), + room_id=room_id, + action=Membership.INVITE, + ) + ) + + @override_config({"experimental_features": {"msc4380_enabled": False}}) + def test_msc4380_disabled_allow_invite_local(self) -> None: + """Test that, when MSC4380 is not enabled, invites are accepted as normal""" + room_id = self.helper.create_room_as(self.alice, tok=self.alice_token) + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG, + { + "default_action": "block", + }, + ) + ) + + self.get_success( + self.handler.update_membership( + requester=create_requester(self.alice), + target=UserID.from_string(self.bob), + room_id=room_id, + action=Membership.INVITE, + ), + ) + + @override_config({"experimental_features": {"msc4380_enabled": True}}) + def test_msc4380_block_invite_remote(self) -> None: + """Test that MSC4380 will block a user from being invited to a room by a remote user.""" + # A remote user who sends the invite + remote_server = "otherserver" + remote_user = "@otheruser:" + remote_server + + self.get_success( + self.store.add_account_data_for_user( + self.bob, + AccountDataTypes.MSC4380_INVITE_PERMISSION_CONFIG, + {"default_action": "block"}, + ) + ) + + room_id = self.helper.create_room_as( + room_creator=self.alice, tok=self.alice_token + ) + room_version = self.get_success(self.store.get_room_version(room_id)) + + invite_event = event_from_pdu_json( + { + "type": EventTypes.Member, + "content": {"membership": "invite"}, + "room_id": room_id, + "sender": remote_user, + "state_key": self.bob, + "depth": 32, + "prev_events": [], + "auth_events": [], + "origin_server_ts": self.clock.time_msec(), + }, + room_version, + ) + + f = self.get_failure( + self.fed_handler.on_invite_request( + remote_server, + invite_event, + invite_event.room_version, + ), + SynapseError, + ).value + self.assertEqual(f.code, 403) + self.assertEqual(f.errcode, "ORG.MATRIX.MSC4155.M_INVITE_BLOCKED") diff --git a/tests/handlers/test_typing.py b/tests/handlers/test_typing.py index 70557a4a5f..623eef0ecb 100644 --- a/tests/handlers/test_typing.py +++ b/tests/handlers/test_typing.py @@ -544,7 +544,7 @@ def test_prune_typing_replication(self) -> None: ) self.assertEqual(rows, [(2, [ROOM_ID, []])]) - self.reactor.advance(FORGET_TIMEOUT) + self.reactor.advance(FORGET_TIMEOUT.as_secs()) rows, _, _ = self.get_success( self.handler.get_all_typing_updates( diff --git a/tests/http/test_servlet.py b/tests/http/test_servlet.py index 5bf8305d05..2f1c8f03c6 100644 --- a/tests/http/test_servlet.py +++ b/tests/http/test_servlet.py @@ -34,6 +34,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from tests import unittest from tests.http.server._base import test_disconnect @@ -108,11 +109,11 @@ def __init__(self, hs: HomeServer): @cancellable async def on_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} async def on_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} diff --git a/tests/logging/test_opentracing.py b/tests/logging/test_opentracing.py index 3aaa743265..d5e643585d 100644 --- a/tests/logging/test_opentracing.py +++ b/tests/logging/test_opentracing.py @@ -37,6 +37,7 @@ ) from synapse.metrics.background_process_metrics import run_as_background_process from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests.server import get_clock @@ -184,7 +185,7 @@ async def task(i: int) -> None: scopes.append(scope) self.assertEqual(self._tracer.active_span, scope.span) - await clock.sleep(4) + await clock.sleep(Duration(seconds=4)) self.assertEqual(self._tracer.active_span, scope.span) scope.close() @@ -194,7 +195,7 @@ async def root() -> None: scopes.append(root_scope) d1 = run_in_background(task, 1) - await clock.sleep(2) + await clock.sleep(Duration(seconds=2)) d2 = run_in_background(task, 2) # because we did run_in_background, the active span should still be the @@ -351,7 +352,7 @@ async def bg_task() -> None: # Now wait for the background process to finish while not callback_finished: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, @@ -418,7 +419,7 @@ async def bg_task() -> None: # Now wait for the background process to finish while not callback_finished: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, diff --git a/tests/metrics/test_common_usage_metrics.py b/tests/metrics/test_common_usage_metrics.py index b0a75768a6..8666754eb5 100644 --- a/tests/metrics/test_common_usage_metrics.py +++ b/tests/metrics/test_common_usage_metrics.py @@ -4,6 +4,7 @@ from synapse.server import HomeServer from synapse.types import create_requester from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests.unittest import FederatingHomeserverTestCase @@ -159,7 +160,7 @@ def test_retained_users_gauge_update(self) -> None: # start the user_daily_visits table update loop self.clock.looping_call( self.hs.get_datastores().main.generate_user_daily_visits, - 5 * 60 * 1000, + Duration(minutes=5), ) metrics = self.get_success(self.manager.get_metrics()) diff --git a/tests/metrics/test_phone_home_stats.py b/tests/metrics/test_phone_home_stats.py index 4462385dae..dfb88588cd 100644 --- a/tests/metrics/test_phone_home_stats.py +++ b/tests/metrics/test_phone_home_stats.py @@ -17,7 +17,7 @@ from twisted.internet.testing import MemoryReactor from synapse.app.phone_stats_home import ( - PHONE_HOME_INTERVAL_SECONDS, + PHONE_HOME_INTERVAL, start_phone_stats_home, ) from synapse.rest import admin, login, register, room @@ -78,7 +78,7 @@ def prepare( def _get_latest_phone_home_stats(self) -> JsonDict: # Wait for `phone_stats_home` to be called again + a healthy margin (50s). - self.reactor.advance(2 * PHONE_HOME_INTERVAL_SECONDS + 50) + self.reactor.advance(2 * PHONE_HOME_INTERVAL.as_secs() + 50) # Extract the reported stats from our http client mock mock_calls = self.put_json_mock.call_args_list diff --git a/tests/replication/http/test__base.py b/tests/replication/http/test__base.py index b757c6428a..1c7e7e997b 100644 --- a/tests/replication/http/test__base.py +++ b/tests/replication/http/test__base.py @@ -30,6 +30,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.cancellation import cancellable +from synapse.util.duration import Duration from tests import unittest from tests.http.server._base import test_disconnect @@ -52,7 +53,7 @@ async def _serialize_payload(**kwargs: ReplicationEndpoint) -> JsonDict: async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict ) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} @@ -73,7 +74,7 @@ async def _serialize_payload(**kwargs: ReplicationEndpoint) -> JsonDict: async def _handle_request( # type: ignore[override] self, request: Request, content: JsonDict ) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} diff --git a/tests/rest/admin/test_background_updates.py b/tests/rest/admin/test_background_updates.py index 25112baaa2..a4a3112e20 100644 --- a/tests/rest/admin/test_background_updates.py +++ b/tests/rest/admin/test_background_updates.py @@ -31,6 +31,7 @@ from synapse.storage.background_updates import BackgroundUpdater from synapse.types import JsonDict from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest @@ -105,7 +106,7 @@ def _register_bg_update(self) -> None: "Adds a bg update but doesn't start it" async def _fake_update(progress: JsonDict, batch_size: int) -> int: - await self.clock.sleep(0.2) + await self.clock.sleep(Duration(milliseconds=200)) return batch_size self.store.db_pool.updates.register_background_update_handler( diff --git a/tests/rest/admin/test_room.py b/tests/rest/admin/test_room.py index 7daf13ad22..1c340efa0c 100644 --- a/tests/rest/admin/test_room.py +++ b/tests/rest/admin/test_room.py @@ -44,6 +44,7 @@ ) from synapse.types import UserID from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.task_scheduler import TaskScheduler from tests import unittest @@ -1161,7 +1162,7 @@ def test_delete_same_room_twice(self) -> None: # Mock PaginationHandler.purge_room to sleep for 100s, so we have time to do a second call # before the purge is over. Note that it doesn't purge anymore, but we don't care. async def purge_room(room_id: str, force: bool) -> None: - await self.hs.get_clock().sleep(100) + await self.hs.get_clock().sleep(Duration(seconds=100)) self.pagination_handler.purge_room = AsyncMock(side_effect=purge_room) # type: ignore[method-assign] @@ -1464,7 +1465,7 @@ def test_scheduled_purge_room(self) -> None: self._is_purged(room_id) # Wait for next scheduler run - self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS) + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs()) self._is_purged(room_id) @@ -1501,7 +1502,7 @@ def test_schedule_shutdown_room(self) -> None: self._is_purged(room_id) # Wait for next scheduler run - self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS) + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs()) # Test that all users has been kicked (room is shutdown) self._has_no_members(room_id) diff --git a/tests/rest/client/sliding_sync/test_connection_tracking.py b/tests/rest/client/sliding_sync/test_connection_tracking.py index 16d13fcc86..44e7fa4726 100644 --- a/tests/rest/client/sliding_sync/test_connection_tracking.py +++ b/tests/rest/client/sliding_sync/test_connection_tracking.py @@ -12,6 +12,7 @@ # . # import logging +from unittest.mock import patch from parameterized import parameterized, parameterized_class @@ -19,8 +20,11 @@ import synapse.rest.admin from synapse.api.constants import EventTypes +from synapse.api.errors import Codes +from synapse.handlers.sliding_sync import room_lists from synapse.rest.client import login, room, sync from synapse.server import HomeServer +from synapse.storage.databases.main.sliding_sync import CONNECTION_EXPIRY from synapse.util.clock import Clock from tests.rest.client.sliding_sync.test_sliding_sync import SlidingSyncBase @@ -395,3 +399,107 @@ def test_rooms_timeline_incremental_sync_NEVER(self) -> None: ) self.assertEqual(response_body["rooms"][room_id1]["limited"], True) self.assertEqual(response_body["rooms"][room_id1]["initial"], True) + + @patch("synapse.handlers.sliding_sync.room_lists.NUM_ROOMS_THRESHOLD", new=5) + def test_sliding_sync_connection_expires_with_too_much_data(self) -> None: + """ + Test that if we have too much data to send down for incremental sync, + we expire the connection and ask the client to do a full resync. + + Connections are only expired if they have not been used for a minimum + amount of time (MINIMUM_NOT_USED_AGE_EXPIRY) to avoid expiring + connections that are actively being used. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + # Create enough rooms that we can later trigger the too much data case + room_ids = [] + for _ in range(room_lists.NUM_ROOMS_THRESHOLD + 2): + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + room_ids.append(room_id) + + # Make sure we don't hit ratelimits + self.reactor.advance(60 * 1000) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1000]], + "required_state": [], + "timeline_limit": 1, + } + } + } + + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Check we got all the rooms down + for room_id in room_ids: + self.assertIn(room_id, response_body["rooms"]) + + # Send a lot of events to cause the connection to expire + for room_id in room_ids: + self.helper.send(room_id, "msg", tok=user2_tok) + + # If we don't advance the clock then we won't expire the connection. + response_body, from_token = self.do_sync(sync_body, tok=user1_tok) + + # Send some more events. + for room_id in room_ids: + self.helper.send(room_id, "msg", tok=user2_tok) + + # Advance the clock to ensure that the last_used_ts is old enough + self.reactor.advance(2 * room_lists.MINIMUM_NOT_USED_AGE_EXPIRY.as_secs()) + + # This sync should now raise SlidingSyncUnknownPosition + channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok) + self.assertEqual(channel.code, 400) + self.assertEqual(channel.json_body["errcode"], Codes.UNKNOWN_POS) + + def test_sliding_sync_connection_expires_after_time(self) -> None: + """ + Test that if we don't use a sliding sync connection for a long time, + we expire the connection and ask the client to do a full resync. + """ + + user1_id = self.register_user("user1", "pass") + user1_tok = self.login(user1_id, "pass") + user2_id = self.register_user("user2", "pass") + user2_tok = self.login(user2_id, "pass") + + room_id = self.helper.create_room_as(user2_id, tok=user2_tok) + self.helper.join(room_id, user1_id, tok=user1_tok) + + # Make the Sliding Sync request + sync_body = { + "lists": { + "foo-list": { + "ranges": [[0, 1000]], + "required_state": [], + "timeline_limit": 1, + } + } + } + + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # We can keep syncing so long as the interval between requests is less + # than CONNECTION_EXPIRY + for _ in range(5): + self.reactor.advance(0.5 * CONNECTION_EXPIRY.as_secs()) + + _, from_token = self.do_sync(sync_body, tok=user1_tok) + + # ... but if we wait too long, the connection expires + self.reactor.advance(1 + CONNECTION_EXPIRY.as_secs()) + + # This sync should now raise SlidingSyncUnknownPosition + channel = self.make_sync_request(sync_body, since=from_token, tok=user1_tok) + self.assertEqual(channel.code, 400) + self.assertEqual(channel.json_body["errcode"], Codes.UNKNOWN_POS) diff --git a/tests/rest/client/sliding_sync/test_sliding_sync.py b/tests/rest/client/sliding_sync/test_sliding_sync.py index c27a712088..bcd22d15ca 100644 --- a/tests/rest/client/sliding_sync/test_sliding_sync.py +++ b/tests/rest/client/sliding_sync/test_sliding_sync.py @@ -46,7 +46,7 @@ from synapse.util.stringutils import random_string from tests import unittest -from tests.server import TimedOutException +from tests.server import FakeChannel, TimedOutException from tests.test_utils.event_injection import create_event logger = logging.getLogger(__name__) @@ -80,12 +80,10 @@ def default_config(self) -> JsonDict: config["experimental_features"] = {"msc3575_enabled": True} return config - def do_sync( + def make_sync_request( self, sync_body: JsonDict, *, since: str | None = None, tok: str - ) -> tuple[JsonDict, str]: - """Do a sliding sync request with given body. - - Asserts the request was successful. + ) -> FakeChannel: + """Make a sliding sync request with given body. Attributes: sync_body: The full request body to use @@ -106,6 +104,24 @@ def do_sync( content=sync_body, access_token=tok, ) + return channel + + def do_sync( + self, sync_body: JsonDict, *, since: str | None = None, tok: str + ) -> tuple[JsonDict, str]: + """Do a sliding sync request with given body. + + Asserts the request was successful. + + Attributes: + sync_body: The full request body to use + since: Optional since token + tok: Access token to use + + Returns: + A tuple of the response body and the `pos` field. + """ + channel = self.make_sync_request(sync_body, since=since, tok=tok) self.assertEqual(channel.code, 200, channel.json_body) return channel.json_body, channel.json_body["pos"] diff --git a/tests/rest/client/test_mutual_rooms.py b/tests/rest/client/test_mutual_rooms.py index 8580d09006..ea063707aa 100644 --- a/tests/rest/client/test_mutual_rooms.py +++ b/tests/rest/client/test_mutual_rooms.py @@ -43,6 +43,12 @@ class UserMutualRoomsTest(unittest.HomeserverTestCase): mutual_rooms.register_servlets, ] + def default_config(self) -> dict: + config = super().default_config() + experimental = config.setdefault("experimental_features", {}) + experimental.setdefault("msc2666_enabled", True) + return config + def make_homeserver(self, reactor: MemoryReactor, clock: Clock) -> HomeServer: config = self.default_config() return self.setup_test_homeserver(config=config) @@ -58,6 +64,21 @@ def _get_mutual_rooms(self, token: str, other_user: str) -> FakeChannel: access_token=token, ) + @unittest.override_config({"experimental_features": {"msc2666_enabled": False}}) + def test_mutual_rooms_no_experimental_flag(self) -> None: + """ + The endpoint should 404 if the experimental flag is not enabled. + """ + # Register a user. + u1 = self.register_user("user1", "pass") + u1_token = self.login(u1, "pass") + + # Check that we're unable to query the endpoint due to the endpoint + # being unrecognised. + channel = self._get_mutual_rooms(u1_token, "@not-used:test") + self.assertEqual(404, channel.code, channel.result) + self.assertEqual("M_UNRECOGNIZED", channel.json_body["errcode"], channel.result) + def test_shared_room_list_public(self) -> None: """ A room should show up in the shared list of rooms between two users diff --git a/tests/rest/client/test_rooms.py b/tests/rest/client/test_rooms.py index 68e09afc54..926560afd6 100644 --- a/tests/rest/client/test_rooms.py +++ b/tests/rest/client/test_rooms.py @@ -3880,9 +3880,11 @@ def test_bad_data(self) -> None: self._set_canonical_alias({"alt_aliases": False}, expected_code=400) self._set_canonical_alias({"alt_aliases": True}, expected_code=400) self._set_canonical_alias({"alt_aliases": {}}, expected_code=400) + self._set_canonical_alias({"alt_aliases": [0]}, expected_code=400) def test_bad_alias(self) -> None: """An alias which does not point to the room raises a SynapseError.""" + self._set_canonical_alias({"alias": {"@unknown:test": "a"}}, expected_code=400) self._set_canonical_alias({"alias": "@unknown:test"}, expected_code=400) self._set_canonical_alias({"alt_aliases": ["@unknown:test"]}, expected_code=400) diff --git a/tests/rest/client/test_transactions.py b/tests/rest/client/test_transactions.py index 64d22d485a..31586a451f 100644 --- a/tests/rest/client/test_transactions.py +++ b/tests/rest/client/test_transactions.py @@ -26,12 +26,10 @@ from twisted.internet import defer, reactor as _reactor from synapse.logging.context import SENTINEL_CONTEXT, LoggingContext, current_context -from synapse.rest.client.transactions import CLEANUP_PERIOD_MS, HttpTransactionCache +from synapse.rest.client.transactions import CLEANUP_PERIOD, HttpTransactionCache from synapse.types import ISynapseReactor, JsonDict from synapse.util.clock import Clock -from synapse.util.constants import ( - MILLISECONDS_PER_SECOND, -) +from synapse.util.duration import Duration from tests import unittest from tests.server import get_clock @@ -96,7 +94,7 @@ def cb() -> Generator["defer.Deferred[object]", object, tuple[int, JsonDict]]: # Ignore `multiple-internal-clocks` linter error here since we are creating a `Clock` # for testing purposes. yield defer.ensureDeferred( - Clock(reactor, server_name="test_server").sleep(0) # type: ignore[multiple-internal-clocks] + Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks] ) return 1, {} @@ -187,7 +185,7 @@ def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]: ) # Advance time just under the cleanup period. # Should NOT have cleaned up yet - self.reactor.advance((CLEANUP_PERIOD_MS - 1) / MILLISECONDS_PER_SECOND) + self.reactor.advance(CLEANUP_PERIOD.as_secs() - 1) yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "an arg" @@ -196,7 +194,7 @@ def test_cleans_up(self) -> Generator["defer.Deferred[Any]", object, None]: cb.assert_called_once_with("an arg") # Advance time just after the cleanup period. - self.reactor.advance(2 / MILLISECONDS_PER_SECOND) + self.reactor.advance(2) yield self.cache.fetch_or_execute_request( self.mock_request, self.mock_requester, cb, "an arg" diff --git a/tests/server_notices/__init__.py b/tests/server_notices/__init__.py index eca52930db..19bda218e3 100644 --- a/tests/server_notices/__init__.py +++ b/tests/server_notices/__init__.py @@ -20,6 +20,7 @@ from synapse.server import HomeServer from synapse.types import JsonDict from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.unittest import override_config @@ -131,7 +132,7 @@ def _check_user_received_server_notice( break # Sleep and try again. - self.get_success(self.clock.sleep(0.1)) + self.get_success(self.clock.sleep(Duration(milliseconds=100))) else: self.fail( f"Failed to join the server notices room. No 'join' field in sync_body['rooms']: {sync_body['rooms']}" diff --git a/tests/state/test_v2.py b/tests/state/test_v2.py index 7db710846d..85ce5bede2 100644 --- a/tests/state/test_v2.py +++ b/tests/state/test_v2.py @@ -42,6 +42,7 @@ ) from synapse.storage.databases.main.event_federation import StateDifference from synapse.types import EventID, StateMap +from synapse.util.duration import Duration from tests import unittest @@ -61,7 +62,7 @@ class FakeClock: - async def sleep(self, msec: float) -> None: + async def sleep(self, duration: Duration) -> None: return None diff --git a/tests/state/test_v21.py b/tests/state/test_v21.py index b17773fb56..58d800f921 100644 --- a/tests/state/test_v21.py +++ b/tests/state/test_v21.py @@ -39,6 +39,7 @@ ) from synapse.types import StateMap from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.state.test_v2 import TestStateResolutionStore @@ -66,7 +67,7 @@ def monotonic_timestamp() -> int: class FakeClock: - async def sleep(self, duration_ms: float) -> None: + async def sleep(self, duration: Duration) -> None: defer.succeed(None) diff --git a/tests/storage/databases/main/test_deviceinbox.py b/tests/storage/databases/main/test_deviceinbox.py index dbf362a3cc..be585068fe 100644 --- a/tests/storage/databases/main/test_deviceinbox.py +++ b/tests/storage/databases/main/test_deviceinbox.py @@ -28,7 +28,7 @@ from synapse.rest.client import devices from synapse.server import HomeServer from synapse.storage.databases.main.deviceinbox import ( - DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS, + DEVICE_FEDERATION_INBOX_CLEANUP_DELAY, ) from synapse.util.clock import Clock @@ -191,7 +191,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: self.db_pool = self.store.db_pool # Advance time to ensure we are past the cleanup delay - self.reactor.advance(DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS * 2 / 1000) + self.reactor.advance(DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_secs() * 2) def test_delete_old_federation_inbox_rows_skips_if_no_index(self) -> None: """Test that we don't delete rows if the index hasn't been created yet.""" @@ -245,7 +245,7 @@ def test_delete_old_federation_inbox_rows(self) -> None: ) ) - self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS / 1000) + self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_secs()) # Insert new messages for i in range(5): @@ -293,7 +293,7 @@ def test_delete_old_federation_inbox_rows_batch_limit(self) -> None: ) # Advance time to ensure we are past the cleanup delay - self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY_MS / 1000) + self.reactor.advance(2 * DEVICE_FEDERATION_INBOX_CLEANUP_DELAY.as_millis()) # Run the cleanup - it should delete in batches and sleep between them deferred = defer.ensureDeferred( diff --git a/tests/storage/databases/main/test_lock.py b/tests/storage/databases/main/test_lock.py index 3743a4a386..622eb96ded 100644 --- a/tests/storage/databases/main/test_lock.py +++ b/tests/storage/databases/main/test_lock.py @@ -26,7 +26,7 @@ from twisted.internet.testing import MemoryReactor from synapse.server import HomeServer -from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS, _RENEWAL_INTERVAL_MS +from synapse.storage.databases.main.lock import _LOCK_TIMEOUT_MS, _RENEWAL_INTERVAL from synapse.util.clock import Clock from tests import unittest @@ -377,7 +377,7 @@ def test_maintain_lock(self) -> None: # Wait for ages with the lock, we should not be able to get the lock. for _ in range(10): - self.reactor.advance((_RENEWAL_INTERVAL_MS / 1000)) + self.reactor.advance((_RENEWAL_INTERVAL.as_secs())) lock2 = self.get_success( self.store.try_acquire_read_write_lock("name", "key", write=True) diff --git a/tests/storage/test_background_update.py b/tests/storage/test_background_update.py index 3505423691..e3f79d7670 100644 --- a/tests/storage/test_background_update.py +++ b/tests/storage/test_background_update.py @@ -38,6 +38,7 @@ from synapse.storage.engines import PostgresEngine, Sqlite3Engine from synapse.types import JsonDict from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.unittest import override_config @@ -59,7 +60,7 @@ def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: async def update(self, progress: JsonDict, count: int) -> int: duration_ms = 10 - await self.clock.sleep((count * duration_ms) / 1000) + await self.clock.sleep(Duration(milliseconds=count * duration_ms)) progress = {"my_key": progress["my_key"] + 1} await self.store.db_pool.runInteraction( "update_progress", @@ -309,7 +310,7 @@ def test_background_update_min_batch_set_in_config(self) -> None: # Run the update with the long-running update item async def update_long(progress: JsonDict, count: int) -> int: - await self.clock.sleep((count * duration_ms) / 1000) + await self.clock.sleep(Duration(milliseconds=count * duration_ms)) progress = {"my_key": progress["my_key"] + 1} await self.store.db_pool.runInteraction( "update_progress", diff --git a/tests/storage/test_invite_rule.py b/tests/storage/test_invite_rule.py index 38c97ecaa3..ae99907704 100644 --- a/tests/storage/test_invite_rule.py +++ b/tests/storage/test_invite_rule.py @@ -1,4 +1,8 @@ -from synapse.storage.invite_rule import InviteRule, InviteRulesConfig +from synapse.storage.invite_rule import ( + AllowAllInviteRulesConfig, + InviteRule, + MSC4155InviteRulesConfig, +) from synapse.types import UserID from tests import unittest @@ -10,23 +14,23 @@ class InviteFilterTestCase(unittest.TestCase): - def test_empty(self) -> None: + def test_allow_all(self) -> None: """Permit by default""" - config = InviteRulesConfig(None) + config = AllowAllInviteRulesConfig() self.assertEqual( config.get_invite_rule(regular_user.to_string()), InviteRule.ALLOW ) def test_ignore_invalid(self) -> None: """Invalid strings are ignored""" - config = InviteRulesConfig({"blocked_users": ["not a user"]}) + config = MSC4155InviteRulesConfig({"blocked_users": ["not a user"]}) self.assertEqual( config.get_invite_rule(blocked_user.to_string()), InviteRule.ALLOW ) def test_user_blocked(self) -> None: """Permit all, except explicitly blocked users""" - config = InviteRulesConfig({"blocked_users": [blocked_user.to_string()]}) + config = MSC4155InviteRulesConfig({"blocked_users": [blocked_user.to_string()]}) self.assertEqual( config.get_invite_rule(blocked_user.to_string()), InviteRule.BLOCK ) @@ -36,7 +40,7 @@ def test_user_blocked(self) -> None: def test_user_ignored(self) -> None: """Permit all, except explicitly ignored users""" - config = InviteRulesConfig({"ignored_users": [ignored_user.to_string()]}) + config = MSC4155InviteRulesConfig({"ignored_users": [ignored_user.to_string()]}) self.assertEqual( config.get_invite_rule(ignored_user.to_string()), InviteRule.IGNORE ) @@ -46,7 +50,7 @@ def test_user_ignored(self) -> None: def test_user_precedence(self) -> None: """Always take allowed over ignored, ignored over blocked, and then block.""" - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "allowed_users": [allowed_user.to_string()], "ignored_users": [allowed_user.to_string(), ignored_user.to_string()], @@ -70,7 +74,7 @@ def test_user_precedence(self) -> None: def test_server_blocked(self) -> None: """Block all users on the server except those allowed.""" user_on_same_server = UserID("blocked", allowed_user.domain) - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "allowed_users": [allowed_user.to_string()], "blocked_servers": [allowed_user.domain], @@ -86,7 +90,7 @@ def test_server_blocked(self) -> None: def test_server_ignored(self) -> None: """Ignore all users on the server except those allowed.""" user_on_same_server = UserID("ignored", allowed_user.domain) - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "allowed_users": [allowed_user.to_string()], "ignored_servers": [allowed_user.domain], @@ -104,7 +108,7 @@ def test_server_allow(self) -> None: blocked_user_on_same_server = UserID("blocked", allowed_user.domain) ignored_user_on_same_server = UserID("ignored", allowed_user.domain) allowed_user_on_same_server = UserID("another", allowed_user.domain) - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "ignored_users": [ignored_user_on_same_server.to_string()], "blocked_users": [blocked_user_on_same_server.to_string()], @@ -129,7 +133,7 @@ def test_server_allow(self) -> None: def test_server_precedence(self) -> None: """Always take allowed over ignored, ignored over blocked, and then block.""" - config = InviteRulesConfig( + config = MSC4155InviteRulesConfig( { "allowed_servers": [allowed_user.domain], "ignored_servers": [allowed_user.domain, ignored_user.domain], @@ -152,7 +156,7 @@ def test_server_precedence(self) -> None: def test_server_glob(self) -> None: """Test that glob patterns match""" - config = InviteRulesConfig({"blocked_servers": ["*.example.org"]}) + config = MSC4155InviteRulesConfig({"blocked_servers": ["*.example.org"]}) self.assertEqual( config.get_invite_rule(allowed_user.to_string()), InviteRule.BLOCK ) diff --git a/tests/storage/test_state.py b/tests/storage/test_state.py index 8e821c6d18..dbbede812d 100644 --- a/tests/storage/test_state.py +++ b/tests/storage/test_state.py @@ -19,6 +19,7 @@ # # +import json import logging from typing import cast @@ -33,6 +34,7 @@ from synapse.types import JsonDict, RoomID, StateMap, UserID from synapse.types.state import StateFilter from synapse.util.clock import Clock +from synapse.util.stringutils import random_string from tests.unittest import HomeserverTestCase @@ -643,3 +645,315 @@ def test_batched_state_group_storing(self) -> None: ), ) self.assertEqual(context.state_group_before_event, groups[0][0]) + + +class CurrentStateDeltaStreamTestCase(HomeserverTestCase): + def prepare(self, reactor: MemoryReactor, clock: Clock, hs: HomeServer) -> None: + super().prepare(reactor, clock, hs) + self.store = hs.get_datastores().main + self.storage = hs.get_storage_controllers() + self.state_datastore = self.storage.state.stores.state + self.event_creation_handler = hs.get_event_creation_handler() + self.event_builder_factory = hs.get_event_builder_factory() + + # Create a made-up room and a user. + self.alice_user_id = UserID.from_string("@alice:test") + self.room = RoomID.from_string("!abc1234:test") + + self.get_success( + self.store.store_room( + self.room.to_string(), + room_creator_user_id="@creator:text", + is_public=True, + room_version=RoomVersions.V1, + ) + ) + + def inject_state_event( + self, room: RoomID, sender: UserID, typ: str, state_key: str, content: JsonDict + ) -> EventBase: + builder = self.event_builder_factory.for_room_version( + RoomVersions.V1, + { + "type": typ, + "sender": sender.to_string(), + "state_key": state_key, + "room_id": room.to_string(), + "content": content, + }, + ) + + event, unpersisted_context = self.get_success( + self.event_creation_handler.create_new_client_event(builder) + ) + + context = self.get_success(unpersisted_context.persist(event)) + + assert self.storage.persistence is not None + self.get_success(self.storage.persistence.persist_event(event, context)) + + return event + + def test_get_partial_current_state_deltas_limit(self) -> None: + """ + Tests that `get_partial_current_state_deltas` actually returns `limit` rows. + + Regression test for https://github.com/element-hq/synapse/pull/18960. + """ + # Inject a create event which other events can auth with. + self.inject_state_event( + self.room, self.alice_user_id, EventTypes.Create, "", {} + ) + + limit = 2 + + # Make N*2 state changes in the room, resulting in 2N+1 total state + # events (including the create event) in the room. + for i in range(limit * 2): + self.inject_state_event( + self.room, + self.alice_user_id, + EventTypes.Name, + "", + {"name": f"rename #{i}"}, + ) + + # Call the function under test. This must return <= `limit` rows. + max_stream_id = self.store.get_room_max_stream_ordering() + clipped_stream_id, deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=0, + max_stream_id=max_stream_id, + limit=limit, + ) + ) + + self.assertLessEqual( + len(deltas), limit, f"Returned {len(deltas)} rows, expected at most {limit}" + ) + + # Advancing from the clipped point should eventually drain the remainder. + # Make sure we make progress and don’t get stuck. + if deltas: + next_prev = clipped_stream_id + next_clipped, next_deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=next_prev, max_stream_id=max_stream_id, limit=limit + ) + ) + self.assertNotEqual( + next_clipped, clipped_stream_id, "Did not advance clipped_stream_id" + ) + # Still should respect the limit. + self.assertLessEqual(len(next_deltas), limit) + + def test_non_unique_stream_ids_in_current_state_delta_stream(self) -> None: + """ + Tests that `get_partial_current_state_deltas` always returns entire + groups of state deltas (grouped by `stream_id`), and never part of one. + + We check by passing a `limit` that to the function that, if followed + blindly, would split a group of state deltas that share a `stream_id`. + The test passes if that group is not returned at all (because doing so + would overshoot the limit of returned state deltas). + + Regression test for https://github.com/element-hq/synapse/pull/18960. + """ + # Inject a create event to start with. + self.inject_state_event( + self.room, self.alice_user_id, EventTypes.Create, "", {} + ) + + # Then inject one "real" m.room.name event. This will give us a stream_id that + # we can create some more (fake) events with. + self.inject_state_event( + self.room, + self.alice_user_id, + EventTypes.Name, + "", + {"name": "rename #1"}, + ) + + # Get the stream_id of the last-inserted event. + max_stream_id = self.store.get_room_max_stream_ordering() + + # Make 3 more state changes in the room, resulting in 5 total state + # events (including the create event, and the first name update) in + # the room. + # + # All of these state deltas have the same `stream_id` as the original name event. + # Do so by editing the table directly as that's the simplest way to have + # all share the same `stream_id`. + self.get_success( + self.store.db_pool.simple_insert_many( + "current_state_delta_stream", + keys=( + "stream_id", + "room_id", + "type", + "state_key", + "event_id", + "prev_event_id", + "instance_name", + ), + values=[ + ( + max_stream_id, + self.room.to_string(), + EventTypes.Name, + "", + f"${random_string(5)}:test", + json.dumps({"name": f"rename #{i}"}), + "master", + ) + for i in range(3) + ], + desc="inject_room_name_state_events", + ) + ) + + # Call the function under test with a limit of 4. Without the limit, we + # would return 5 state deltas: + # + # C N N N N + # 1 2 3 4 5 + # + # C = m.room.create + # N = m.room.name + # + # With the limit, we should return only the create event, as returning 4 + # state deltas would result in splitting a group: + # + # 2 3 3 3 3 - state IDs/groups + # C N N N N + # 1 2 3 4 X + + clipped_stream_id, deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=0, + max_stream_id=max_stream_id, + limit=4, + ) + ) + + # 2 is the stream ID of the m.room.create event. + self.assertEqual(clipped_stream_id, 2) + self.assertEqual( + len(deltas), + 1, + f"Returned {len(deltas)} rows, expected only one (the create event): {deltas}", + ) + + # Advance once more with our limit of 4. We should now get all 4 + # `m.room.name` state deltas as they can fit under the limit. + clipped_stream_id, next_deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=clipped_stream_id, max_stream_id=max_stream_id, limit=4 + ) + ) + self.assertEqual( + clipped_stream_id, 3 + ) # The stream ID of the 4 m.room.name events. + + self.assertEqual( + len(next_deltas), + 4, + f"Returned {len(next_deltas)} rows, expected all 4 m.room.name events: {next_deltas}", + ) + + def test_get_partial_current_state_deltas_does_not_enter_infinite_loop( + self, + ) -> None: + """ + Tests that `get_partial_current_state_deltas` does not repeatedly return + zero entries due to the passed `limit` parameter being less than the + size of the next group of state deltas from the given `prev_stream_id`. + """ + # Inject a create event to start with. + self.inject_state_event( + self.room, self.alice_user_id, EventTypes.Create, "", {} + ) + + # Then inject one "real" m.room.name event. This will give us a stream_id that + # we can create some more (fake) events with. + self.inject_state_event( + self.room, + self.alice_user_id, + EventTypes.Name, + "", + {"name": "rename #1"}, + ) + + # Get the stream_id of the last-inserted event. + max_stream_id = self.store.get_room_max_stream_ordering() + + # Make 3 more state changes in the room, resulting in 5 total state + # events (including the create event, and the first name update) in + # the room. + # + # All of these state deltas have the same `stream_id` as the original name event. + # Do so by editing the table directly as that's the simplest way to have + # all share the same `stream_id`. + self.get_success( + self.store.db_pool.simple_insert_many( + "current_state_delta_stream", + keys=( + "stream_id", + "room_id", + "type", + "state_key", + "event_id", + "prev_event_id", + "instance_name", + ), + values=[ + ( + max_stream_id, + self.room.to_string(), + EventTypes.Name, + "", + f"${random_string(5)}:test", + json.dumps({"name": f"rename #{i}"}), + "master", + ) + for i in range(3) + ], + desc="inject_room_name_state_events", + ) + ) + + # Call the function under test with a limit of 4. Without the limit, we would return + # 5 state deltas: + # + # C N N N N + # 1 2 3 4 5 + # + # C = m.room.create + # N = m.room.name + # + # With the limit, we should return only the create event, as returning 4 + # state deltas would result in splitting a group: + # + # 2 3 3 3 3 - state IDs/groups + # C N N N N + # 1 2 3 4 X + + clipped_stream_id, deltas = self.get_success( + self.store.get_partial_current_state_deltas( + prev_stream_id=2, # Start after the create event (which has stream_id 2). + max_stream_id=max_stream_id, + limit=2, # Less than the size of the next group (which is 4). + ) + ) + + self.assertEqual( + clipped_stream_id, 3 + ) # The stream ID of the 4 m.room.name events. + + # We should get all 4 `m.room.name` state deltas, instead of 0, which + # would result in the caller entering an infinite loop. + self.assertEqual( + len(deltas), + 4, + f"Returned {len(deltas)} rows, expected 4 even though it broke our limit: {deltas}", + ) diff --git a/tests/test_server.py b/tests/test_server.py index 2df6bdfa44..ec31b6cc5f 100644 --- a/tests/test_server.py +++ b/tests/test_server.py @@ -38,6 +38,7 @@ from synapse.types import JsonDict from synapse.util.cancellation import cancellable from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.http.server._base import test_disconnect @@ -406,11 +407,11 @@ def __init__(self, clock: Clock): @cancellable async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} async def _async_render_POST(self, request: SynapseRequest) -> tuple[int, JsonDict]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, {"result": True} @@ -423,11 +424,11 @@ def __init__(self, clock: Clock): @cancellable async def _async_render_GET(self, request: SynapseRequest) -> tuple[int, bytes]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, b"ok" async def _async_render_POST(self, request: SynapseRequest) -> tuple[int, bytes]: - await self.clock.sleep(1.0) + await self.clock.sleep(Duration(seconds=1)) return HTTPStatus.OK, b"ok" diff --git a/tests/util/caches/test_response_cache.py b/tests/util/caches/test_response_cache.py index 30cd6ef0e4..def5c817db 100644 --- a/tests/util/caches/test_response_cache.py +++ b/tests/util/caches/test_response_cache.py @@ -26,6 +26,7 @@ from twisted.internet import defer from synapse.util.caches.response_cache import ResponseCache, ResponseCacheContext +from synapse.util.duration import Duration from tests.server import get_clock from tests.unittest import TestCase @@ -55,7 +56,7 @@ async def instant_return(o: str) -> str: return o async def delayed_return(self, o: str) -> str: - await self.clock.sleep(1) + await self.clock.sleep(Duration(seconds=1)) return o def test_cache_hit(self) -> None: @@ -182,7 +183,7 @@ def test_cache_context_nocache(self, should_cache: bool) -> None: async def non_caching(o: str, cache_context: ResponseCacheContext[int]) -> str: nonlocal call_count call_count += 1 - await self.clock.sleep(1) + await self.clock.sleep(Duration(seconds=1)) cache_context.should_cache = should_cache return o diff --git a/tests/util/test_logcontext.py b/tests/util/test_logcontext.py index ca805bb20a..a4114cdfcc 100644 --- a/tests/util/test_logcontext.py +++ b/tests/util/test_logcontext.py @@ -37,6 +37,7 @@ ) from synapse.types import ISynapseReactor from synapse.util.clock import Clock +from synapse.util.duration import Duration from tests import unittest from tests.unittest import logcontext_clean @@ -82,7 +83,7 @@ async def competing_callback() -> None: self._check_test_key("sentinel") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("sentinel") @@ -94,9 +95,9 @@ async def competing_callback() -> None: reactor.callLater(0, lambda: defer.ensureDeferred(competing_callback())) # type: ignore[call-later-not-tracked] with LoggingContext(name="foo", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -128,7 +129,7 @@ async def competing_callback() -> None: self._check_test_key("looping_call") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("looping_call") @@ -139,12 +140,12 @@ async def competing_callback() -> None: with LoggingContext(name="foo", server_name="test_server"): lc = clock.looping_call( - lambda: defer.ensureDeferred(competing_callback()), 0 + lambda: defer.ensureDeferred(competing_callback()), Duration(seconds=0) ) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -179,7 +180,7 @@ async def competing_callback() -> None: self._check_test_key("looping_call") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("looping_call") @@ -190,10 +191,10 @@ async def competing_callback() -> None: with LoggingContext(name="foo", server_name="test_server"): lc = clock.looping_call_now( - lambda: defer.ensureDeferred(competing_callback()), 0 + lambda: defer.ensureDeferred(competing_callback()), Duration(seconds=0) ) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -228,7 +229,7 @@ async def competing_callback() -> None: self._check_test_key("call_later") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("call_later") @@ -238,11 +239,13 @@ async def competing_callback() -> None: callback_finished = True with LoggingContext(name="foo", server_name="test_server"): - clock.call_later(0, lambda: defer.ensureDeferred(competing_callback())) + clock.call_later( + Duration(seconds=0), lambda: defer.ensureDeferred(competing_callback()) + ) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -280,7 +283,7 @@ async def competing_callback() -> None: self._check_test_key("foo") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("foo") @@ -303,7 +306,7 @@ async def competing_callback() -> None: await d self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, @@ -338,7 +341,7 @@ async def competing_callback() -> None: self._check_test_key("sentinel") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("sentinel") @@ -364,7 +367,7 @@ async def competing_callback() -> None: d.callback(None) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, @@ -400,7 +403,7 @@ async def competing_callback() -> None: self._check_test_key("foo") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("foo") @@ -446,7 +449,7 @@ async def competing_callback() -> None: run_in_background(lambda: (d.callback(None), d)[1]) # type: ignore[call-overload, func-returns-value] self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self.assertTrue( callback_finished, @@ -486,7 +489,7 @@ def callback(result: object) -> object: # Now wait for the function under test to have run, and check that # the logcontext is left in a sane state. while not callback_finished: - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( @@ -501,7 +504,7 @@ def callback(result: object) -> object: async def test_run_in_background_with_blocking_fn(self) -> None: async def blocking_function() -> None: # Ignore linter error since we are creating a `Clock` for testing purposes. - await Clock(reactor, server_name="test_server").sleep(0) # type: ignore[multiple-internal-clocks] + await Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks] await self._test_run_in_background(blocking_function) @@ -535,7 +538,9 @@ async def test_run_in_background_with_coroutine(self) -> None: async def testfunc() -> None: self._check_test_key("foo") # Ignore linter error since we are creating a `Clock` for testing purposes. - d = defer.ensureDeferred(Clock(reactor, server_name="test_server").sleep(0)) # type: ignore[multiple-internal-clocks] + d = defer.ensureDeferred( + Clock(reactor, server_name="test_server").sleep(Duration(seconds=0)) # type: ignore[multiple-internal-clocks] + ) self.assertIs(current_context(), SENTINEL_CONTEXT) await d self._check_test_key("foo") @@ -579,7 +584,7 @@ async def competing_callback() -> None: self._check_test_key("foo") with LoggingContext(name="competing", server_name="test_server"): - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("competing") self._check_test_key("foo") @@ -591,7 +596,7 @@ async def competing_callback() -> None: with LoggingContext(name="foo", server_name="test_server"): run_coroutine_in_background(competing_callback()) self._check_test_key("foo") - await clock.sleep(0) + await clock.sleep(Duration(seconds=0)) self._check_test_key("foo") self.assertTrue( diff --git a/tests/util/test_task_scheduler.py b/tests/util/test_task_scheduler.py index e33ded8a7f..2c8e21b339 100644 --- a/tests/util/test_task_scheduler.py +++ b/tests/util/test_task_scheduler.py @@ -26,6 +26,7 @@ from synapse.server import HomeServer from synapse.types import JsonMapping, ScheduledTask, TaskStatus from synapse.util.clock import Clock +from synapse.util.duration import Duration from synapse.util.task_scheduler import TaskScheduler from tests.replication._base import BaseMultiWorkerStreamTestCase @@ -68,7 +69,7 @@ def test_schedule_task(self) -> None: # The timestamp being 30s after now the task should been executed # after the first scheduling loop is run - self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL_MS / 1000) + self.reactor.advance(TaskScheduler.SCHEDULE_INTERVAL.as_secs()) task = self.get_success(self.task_scheduler.get_task(task_id)) assert task is not None @@ -87,7 +88,7 @@ async def _sleeping_task( self, task: ScheduledTask ) -> tuple[TaskStatus, JsonMapping | None, str | None]: # Sleep for a second - await self.hs.get_clock().sleep(1) + await self.hs.get_clock().sleep(Duration(seconds=1)) return TaskStatus.COMPLETE, None, None def test_schedule_lot_of_tasks(self) -> None: @@ -187,7 +188,7 @@ def test_schedule_resumable_task(self) -> None: # Simulate a synapse restart by emptying the list of running tasks self.task_scheduler._running_tasks = set() - self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL_MS / 1000)) + self.reactor.advance((TaskScheduler.SCHEDULE_INTERVAL.as_secs())) task = self.get_success(self.task_scheduler.get_task(task_id)) assert task is not None